drm/vmwgfx: Read bounding box memory from the appropriate register
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
32 #include <drm/drmP.h>
33 #include "vmwgfx_resource_priv.h"
34
35 #define VMW_RES_EVICT_ERR_COUNT 10
36
37 struct vmw_user_dma_buffer {
38 struct ttm_prime_object prime;
39 struct vmw_dma_buffer dma;
40 };
41
42 struct vmw_bo_user_rep {
43 uint32_t handle;
44 uint64_t map_handle;
45 };
46
47 struct vmw_stream {
48 struct vmw_resource res;
49 uint32_t stream_id;
50 };
51
52 struct vmw_user_stream {
53 struct ttm_base_object base;
54 struct vmw_stream stream;
55 };
56
57
58 static uint64_t vmw_user_stream_size;
59
60 static const struct vmw_res_func vmw_stream_func = {
61 .res_type = vmw_res_stream,
62 .needs_backup = false,
63 .may_evict = false,
64 .type_name = "video streams",
65 .backup_placement = NULL,
66 .create = NULL,
67 .destroy = NULL,
68 .bind = NULL,
69 .unbind = NULL
70 };
71
72 static inline struct vmw_dma_buffer *
73 vmw_dma_buffer(struct ttm_buffer_object *bo)
74 {
75 return container_of(bo, struct vmw_dma_buffer, base);
76 }
77
78 static inline struct vmw_user_dma_buffer *
79 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
80 {
81 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
82 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
83 }
84
85 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
86 {
87 kref_get(&res->kref);
88 return res;
89 }
90
91
92 /**
93 * vmw_resource_release_id - release a resource id to the id manager.
94 *
95 * @res: Pointer to the resource.
96 *
97 * Release the resource id to the resource id manager and set it to -1
98 */
99 void vmw_resource_release_id(struct vmw_resource *res)
100 {
101 struct vmw_private *dev_priv = res->dev_priv;
102 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
103
104 write_lock(&dev_priv->resource_lock);
105 if (res->id != -1)
106 idr_remove(idr, res->id);
107 res->id = -1;
108 write_unlock(&dev_priv->resource_lock);
109 }
110
111 static void vmw_resource_release(struct kref *kref)
112 {
113 struct vmw_resource *res =
114 container_of(kref, struct vmw_resource, kref);
115 struct vmw_private *dev_priv = res->dev_priv;
116 int id;
117 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
118
119 res->avail = false;
120 list_del_init(&res->lru_head);
121 write_unlock(&dev_priv->resource_lock);
122 if (res->backup) {
123 struct ttm_buffer_object *bo = &res->backup->base;
124
125 ttm_bo_reserve(bo, false, false, false, 0);
126 if (!list_empty(&res->mob_head) &&
127 res->func->unbind != NULL) {
128 struct ttm_validate_buffer val_buf;
129
130 val_buf.bo = bo;
131 res->func->unbind(res, false, &val_buf);
132 }
133 res->backup_dirty = false;
134 list_del_init(&res->mob_head);
135 ttm_bo_unreserve(bo);
136 vmw_dmabuf_unreference(&res->backup);
137 }
138
139 if (likely(res->hw_destroy != NULL))
140 res->hw_destroy(res);
141
142 id = res->id;
143 if (res->res_free != NULL)
144 res->res_free(res);
145 else
146 kfree(res);
147
148 write_lock(&dev_priv->resource_lock);
149
150 if (id != -1)
151 idr_remove(idr, id);
152 }
153
154 void vmw_resource_unreference(struct vmw_resource **p_res)
155 {
156 struct vmw_resource *res = *p_res;
157 struct vmw_private *dev_priv = res->dev_priv;
158
159 *p_res = NULL;
160 write_lock(&dev_priv->resource_lock);
161 kref_put(&res->kref, vmw_resource_release);
162 write_unlock(&dev_priv->resource_lock);
163 }
164
165
166 /**
167 * vmw_resource_alloc_id - release a resource id to the id manager.
168 *
169 * @res: Pointer to the resource.
170 *
171 * Allocate the lowest free resource from the resource manager, and set
172 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
173 */
174 int vmw_resource_alloc_id(struct vmw_resource *res)
175 {
176 struct vmw_private *dev_priv = res->dev_priv;
177 int ret;
178 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
179
180 BUG_ON(res->id != -1);
181
182 idr_preload(GFP_KERNEL);
183 write_lock(&dev_priv->resource_lock);
184
185 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
186 if (ret >= 0)
187 res->id = ret;
188
189 write_unlock(&dev_priv->resource_lock);
190 idr_preload_end();
191 return ret < 0 ? ret : 0;
192 }
193
194 /**
195 * vmw_resource_init - initialize a struct vmw_resource
196 *
197 * @dev_priv: Pointer to a device private struct.
198 * @res: The struct vmw_resource to initialize.
199 * @obj_type: Resource object type.
200 * @delay_id: Boolean whether to defer device id allocation until
201 * the first validation.
202 * @res_free: Resource destructor.
203 * @func: Resource function table.
204 */
205 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
206 bool delay_id,
207 void (*res_free) (struct vmw_resource *res),
208 const struct vmw_res_func *func)
209 {
210 kref_init(&res->kref);
211 res->hw_destroy = NULL;
212 res->res_free = res_free;
213 res->avail = false;
214 res->dev_priv = dev_priv;
215 res->func = func;
216 INIT_LIST_HEAD(&res->lru_head);
217 INIT_LIST_HEAD(&res->mob_head);
218 res->id = -1;
219 res->backup = NULL;
220 res->backup_offset = 0;
221 res->backup_dirty = false;
222 res->res_dirty = false;
223 if (delay_id)
224 return 0;
225 else
226 return vmw_resource_alloc_id(res);
227 }
228
229 /**
230 * vmw_resource_activate
231 *
232 * @res: Pointer to the newly created resource
233 * @hw_destroy: Destroy function. NULL if none.
234 *
235 * Activate a resource after the hardware has been made aware of it.
236 * Set tye destroy function to @destroy. Typically this frees the
237 * resource and destroys the hardware resources associated with it.
238 * Activate basically means that the function vmw_resource_lookup will
239 * find it.
240 */
241 void vmw_resource_activate(struct vmw_resource *res,
242 void (*hw_destroy) (struct vmw_resource *))
243 {
244 struct vmw_private *dev_priv = res->dev_priv;
245
246 write_lock(&dev_priv->resource_lock);
247 res->avail = true;
248 res->hw_destroy = hw_destroy;
249 write_unlock(&dev_priv->resource_lock);
250 }
251
252 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
253 struct idr *idr, int id)
254 {
255 struct vmw_resource *res;
256
257 read_lock(&dev_priv->resource_lock);
258 res = idr_find(idr, id);
259 if (res && res->avail)
260 kref_get(&res->kref);
261 else
262 res = NULL;
263 read_unlock(&dev_priv->resource_lock);
264
265 if (unlikely(res == NULL))
266 return NULL;
267
268 return res;
269 }
270
271 /**
272 * vmw_user_resource_lookup_handle - lookup a struct resource from a
273 * TTM user-space handle and perform basic type checks
274 *
275 * @dev_priv: Pointer to a device private struct
276 * @tfile: Pointer to a struct ttm_object_file identifying the caller
277 * @handle: The TTM user-space handle
278 * @converter: Pointer to an object describing the resource type
279 * @p_res: On successful return the location pointed to will contain
280 * a pointer to a refcounted struct vmw_resource.
281 *
282 * If the handle can't be found or is associated with an incorrect resource
283 * type, -EINVAL will be returned.
284 */
285 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
286 struct ttm_object_file *tfile,
287 uint32_t handle,
288 const struct vmw_user_resource_conv
289 *converter,
290 struct vmw_resource **p_res)
291 {
292 struct ttm_base_object *base;
293 struct vmw_resource *res;
294 int ret = -EINVAL;
295
296 base = ttm_base_object_lookup(tfile, handle);
297 if (unlikely(base == NULL))
298 return -EINVAL;
299
300 if (unlikely(ttm_base_object_type(base) != converter->object_type))
301 goto out_bad_resource;
302
303 res = converter->base_obj_to_res(base);
304
305 read_lock(&dev_priv->resource_lock);
306 if (!res->avail || res->res_free != converter->res_free) {
307 read_unlock(&dev_priv->resource_lock);
308 goto out_bad_resource;
309 }
310
311 kref_get(&res->kref);
312 read_unlock(&dev_priv->resource_lock);
313
314 *p_res = res;
315 ret = 0;
316
317 out_bad_resource:
318 ttm_base_object_unref(&base);
319
320 return ret;
321 }
322
323 /**
324 * Helper function that looks either a surface or dmabuf.
325 *
326 * The pointer this pointed at by out_surf and out_buf needs to be null.
327 */
328 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
329 struct ttm_object_file *tfile,
330 uint32_t handle,
331 struct vmw_surface **out_surf,
332 struct vmw_dma_buffer **out_buf)
333 {
334 struct vmw_resource *res;
335 int ret;
336
337 BUG_ON(*out_surf || *out_buf);
338
339 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
340 user_surface_converter,
341 &res);
342 if (!ret) {
343 *out_surf = vmw_res_to_srf(res);
344 return 0;
345 }
346
347 *out_surf = NULL;
348 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
349 return ret;
350 }
351
352 /**
353 * Buffer management.
354 */
355
356 /**
357 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
358 *
359 * @dev_priv: Pointer to a struct vmw_private identifying the device.
360 * @size: The requested buffer size.
361 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
362 */
363 static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
364 bool user)
365 {
366 static size_t struct_size, user_struct_size;
367 size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
368 size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
369
370 if (unlikely(struct_size == 0)) {
371 size_t backend_size = ttm_round_pot(vmw_tt_size);
372
373 struct_size = backend_size +
374 ttm_round_pot(sizeof(struct vmw_dma_buffer));
375 user_struct_size = backend_size +
376 ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
377 }
378
379 if (dev_priv->map_mode == vmw_dma_alloc_coherent)
380 page_array_size +=
381 ttm_round_pot(num_pages * sizeof(dma_addr_t));
382
383 return ((user) ? user_struct_size : struct_size) +
384 page_array_size;
385 }
386
387 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
388 {
389 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
390
391 kfree(vmw_bo);
392 }
393
394 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
395 {
396 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
397
398 ttm_prime_object_kfree(vmw_user_bo, prime);
399 }
400
401 int vmw_dmabuf_init(struct vmw_private *dev_priv,
402 struct vmw_dma_buffer *vmw_bo,
403 size_t size, struct ttm_placement *placement,
404 bool interruptible,
405 void (*bo_free) (struct ttm_buffer_object *bo))
406 {
407 struct ttm_bo_device *bdev = &dev_priv->bdev;
408 size_t acc_size;
409 int ret;
410 bool user = (bo_free == &vmw_user_dmabuf_destroy);
411
412 BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
413
414 acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
415 memset(vmw_bo, 0, sizeof(*vmw_bo));
416
417 INIT_LIST_HEAD(&vmw_bo->res_list);
418
419 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
420 (user) ? ttm_bo_type_device :
421 ttm_bo_type_kernel, placement,
422 0, interruptible,
423 NULL, acc_size, NULL, bo_free);
424 return ret;
425 }
426
427 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
428 {
429 struct vmw_user_dma_buffer *vmw_user_bo;
430 struct ttm_base_object *base = *p_base;
431 struct ttm_buffer_object *bo;
432
433 *p_base = NULL;
434
435 if (unlikely(base == NULL))
436 return;
437
438 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
439 prime.base);
440 bo = &vmw_user_bo->dma.base;
441 ttm_bo_unref(&bo);
442 }
443
444 /**
445 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
446 *
447 * @dev_priv: Pointer to a struct device private.
448 * @tfile: Pointer to a struct ttm_object_file on which to register the user
449 * object.
450 * @size: Size of the dma buffer.
451 * @shareable: Boolean whether the buffer is shareable with other open files.
452 * @handle: Pointer to where the handle value should be assigned.
453 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
454 * should be assigned.
455 */
456 int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
457 struct ttm_object_file *tfile,
458 uint32_t size,
459 bool shareable,
460 uint32_t *handle,
461 struct vmw_dma_buffer **p_dma_buf)
462 {
463 struct vmw_user_dma_buffer *user_bo;
464 struct ttm_buffer_object *tmp;
465 int ret;
466
467 user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
468 if (unlikely(user_bo == NULL)) {
469 DRM_ERROR("Failed to allocate a buffer.\n");
470 return -ENOMEM;
471 }
472
473 ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
474 &vmw_vram_sys_placement, true,
475 &vmw_user_dmabuf_destroy);
476 if (unlikely(ret != 0))
477 return ret;
478
479 tmp = ttm_bo_reference(&user_bo->dma.base);
480 ret = ttm_prime_object_init(tfile,
481 size,
482 &user_bo->prime,
483 shareable,
484 ttm_buffer_type,
485 &vmw_user_dmabuf_release, NULL);
486 if (unlikely(ret != 0)) {
487 ttm_bo_unref(&tmp);
488 goto out_no_base_object;
489 }
490
491 *p_dma_buf = &user_bo->dma;
492 *handle = user_bo->prime.base.hash.key;
493
494 out_no_base_object:
495 return ret;
496 }
497
498 /**
499 * vmw_user_dmabuf_verify_access - verify access permissions on this
500 * buffer object.
501 *
502 * @bo: Pointer to the buffer object being accessed
503 * @tfile: Identifying the caller.
504 */
505 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
506 struct ttm_object_file *tfile)
507 {
508 struct vmw_user_dma_buffer *vmw_user_bo;
509
510 if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
511 return -EPERM;
512
513 vmw_user_bo = vmw_user_dma_buffer(bo);
514 return (vmw_user_bo->prime.base.tfile == tfile ||
515 vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
516 }
517
518 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
519 struct drm_file *file_priv)
520 {
521 struct vmw_private *dev_priv = vmw_priv(dev);
522 union drm_vmw_alloc_dmabuf_arg *arg =
523 (union drm_vmw_alloc_dmabuf_arg *)data;
524 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
525 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
526 struct vmw_dma_buffer *dma_buf;
527 uint32_t handle;
528 struct vmw_master *vmaster = vmw_master(file_priv->master);
529 int ret;
530
531 ret = ttm_read_lock(&vmaster->lock, true);
532 if (unlikely(ret != 0))
533 return ret;
534
535 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
536 req->size, false, &handle, &dma_buf);
537 if (unlikely(ret != 0))
538 goto out_no_dmabuf;
539
540 rep->handle = handle;
541 rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
542 rep->cur_gmr_id = handle;
543 rep->cur_gmr_offset = 0;
544
545 vmw_dmabuf_unreference(&dma_buf);
546
547 out_no_dmabuf:
548 ttm_read_unlock(&vmaster->lock);
549
550 return ret;
551 }
552
553 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
554 struct drm_file *file_priv)
555 {
556 struct drm_vmw_unref_dmabuf_arg *arg =
557 (struct drm_vmw_unref_dmabuf_arg *)data;
558
559 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
560 arg->handle,
561 TTM_REF_USAGE);
562 }
563
564 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
565 uint32_t handle, struct vmw_dma_buffer **out)
566 {
567 struct vmw_user_dma_buffer *vmw_user_bo;
568 struct ttm_base_object *base;
569
570 base = ttm_base_object_lookup(tfile, handle);
571 if (unlikely(base == NULL)) {
572 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
573 (unsigned long)handle);
574 return -ESRCH;
575 }
576
577 if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
578 ttm_base_object_unref(&base);
579 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
580 (unsigned long)handle);
581 return -EINVAL;
582 }
583
584 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
585 prime.base);
586 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
587 ttm_base_object_unref(&base);
588 *out = &vmw_user_bo->dma;
589
590 return 0;
591 }
592
593 int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
594 struct vmw_dma_buffer *dma_buf)
595 {
596 struct vmw_user_dma_buffer *user_bo;
597
598 if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
599 return -EINVAL;
600
601 user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
602 return ttm_ref_object_add(tfile, &user_bo->prime.base,
603 TTM_REF_USAGE, NULL);
604 }
605
606 /*
607 * Stream management
608 */
609
610 static void vmw_stream_destroy(struct vmw_resource *res)
611 {
612 struct vmw_private *dev_priv = res->dev_priv;
613 struct vmw_stream *stream;
614 int ret;
615
616 DRM_INFO("%s: unref\n", __func__);
617 stream = container_of(res, struct vmw_stream, res);
618
619 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
620 WARN_ON(ret != 0);
621 }
622
623 static int vmw_stream_init(struct vmw_private *dev_priv,
624 struct vmw_stream *stream,
625 void (*res_free) (struct vmw_resource *res))
626 {
627 struct vmw_resource *res = &stream->res;
628 int ret;
629
630 ret = vmw_resource_init(dev_priv, res, false, res_free,
631 &vmw_stream_func);
632
633 if (unlikely(ret != 0)) {
634 if (res_free == NULL)
635 kfree(stream);
636 else
637 res_free(&stream->res);
638 return ret;
639 }
640
641 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
642 if (ret) {
643 vmw_resource_unreference(&res);
644 return ret;
645 }
646
647 DRM_INFO("%s: claimed\n", __func__);
648
649 vmw_resource_activate(&stream->res, vmw_stream_destroy);
650 return 0;
651 }
652
653 static void vmw_user_stream_free(struct vmw_resource *res)
654 {
655 struct vmw_user_stream *stream =
656 container_of(res, struct vmw_user_stream, stream.res);
657 struct vmw_private *dev_priv = res->dev_priv;
658
659 ttm_base_object_kfree(stream, base);
660 ttm_mem_global_free(vmw_mem_glob(dev_priv),
661 vmw_user_stream_size);
662 }
663
664 /**
665 * This function is called when user space has no more references on the
666 * base object. It releases the base-object's reference on the resource object.
667 */
668
669 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
670 {
671 struct ttm_base_object *base = *p_base;
672 struct vmw_user_stream *stream =
673 container_of(base, struct vmw_user_stream, base);
674 struct vmw_resource *res = &stream->stream.res;
675
676 *p_base = NULL;
677 vmw_resource_unreference(&res);
678 }
679
680 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
681 struct drm_file *file_priv)
682 {
683 struct vmw_private *dev_priv = vmw_priv(dev);
684 struct vmw_resource *res;
685 struct vmw_user_stream *stream;
686 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
687 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
688 struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
689 int ret = 0;
690
691
692 res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
693 if (unlikely(res == NULL))
694 return -EINVAL;
695
696 if (res->res_free != &vmw_user_stream_free) {
697 ret = -EINVAL;
698 goto out;
699 }
700
701 stream = container_of(res, struct vmw_user_stream, stream.res);
702 if (stream->base.tfile != tfile) {
703 ret = -EINVAL;
704 goto out;
705 }
706
707 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
708 out:
709 vmw_resource_unreference(&res);
710 return ret;
711 }
712
713 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
714 struct drm_file *file_priv)
715 {
716 struct vmw_private *dev_priv = vmw_priv(dev);
717 struct vmw_user_stream *stream;
718 struct vmw_resource *res;
719 struct vmw_resource *tmp;
720 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
721 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
722 struct vmw_master *vmaster = vmw_master(file_priv->master);
723 int ret;
724
725 /*
726 * Approximate idr memory usage with 128 bytes. It will be limited
727 * by maximum number_of streams anyway?
728 */
729
730 if (unlikely(vmw_user_stream_size == 0))
731 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
732
733 ret = ttm_read_lock(&vmaster->lock, true);
734 if (unlikely(ret != 0))
735 return ret;
736
737 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
738 vmw_user_stream_size,
739 false, true);
740 if (unlikely(ret != 0)) {
741 if (ret != -ERESTARTSYS)
742 DRM_ERROR("Out of graphics memory for stream"
743 " creation.\n");
744 goto out_unlock;
745 }
746
747
748 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
749 if (unlikely(stream == NULL)) {
750 ttm_mem_global_free(vmw_mem_glob(dev_priv),
751 vmw_user_stream_size);
752 ret = -ENOMEM;
753 goto out_unlock;
754 }
755
756 res = &stream->stream.res;
757 stream->base.shareable = false;
758 stream->base.tfile = NULL;
759
760 /*
761 * From here on, the destructor takes over resource freeing.
762 */
763
764 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
765 if (unlikely(ret != 0))
766 goto out_unlock;
767
768 tmp = vmw_resource_reference(res);
769 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
770 &vmw_user_stream_base_release, NULL);
771
772 if (unlikely(ret != 0)) {
773 vmw_resource_unreference(&tmp);
774 goto out_err;
775 }
776
777 arg->stream_id = res->id;
778 out_err:
779 vmw_resource_unreference(&res);
780 out_unlock:
781 ttm_read_unlock(&vmaster->lock);
782 return ret;
783 }
784
785 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
786 struct ttm_object_file *tfile,
787 uint32_t *inout_id, struct vmw_resource **out)
788 {
789 struct vmw_user_stream *stream;
790 struct vmw_resource *res;
791 int ret;
792
793 res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
794 *inout_id);
795 if (unlikely(res == NULL))
796 return -EINVAL;
797
798 if (res->res_free != &vmw_user_stream_free) {
799 ret = -EINVAL;
800 goto err_ref;
801 }
802
803 stream = container_of(res, struct vmw_user_stream, stream.res);
804 if (stream->base.tfile != tfile) {
805 ret = -EPERM;
806 goto err_ref;
807 }
808
809 *inout_id = stream->stream.stream_id;
810 *out = res;
811 return 0;
812 err_ref:
813 vmw_resource_unreference(&res);
814 return ret;
815 }
816
817
818 /**
819 * vmw_dumb_create - Create a dumb kms buffer
820 *
821 * @file_priv: Pointer to a struct drm_file identifying the caller.
822 * @dev: Pointer to the drm device.
823 * @args: Pointer to a struct drm_mode_create_dumb structure
824 *
825 * This is a driver callback for the core drm create_dumb functionality.
826 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
827 * that the arguments have a different format.
828 */
829 int vmw_dumb_create(struct drm_file *file_priv,
830 struct drm_device *dev,
831 struct drm_mode_create_dumb *args)
832 {
833 struct vmw_private *dev_priv = vmw_priv(dev);
834 struct vmw_master *vmaster = vmw_master(file_priv->master);
835 struct vmw_dma_buffer *dma_buf;
836 int ret;
837
838 args->pitch = args->width * ((args->bpp + 7) / 8);
839 args->size = args->pitch * args->height;
840
841 ret = ttm_read_lock(&vmaster->lock, true);
842 if (unlikely(ret != 0))
843 return ret;
844
845 ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
846 args->size, false, &args->handle,
847 &dma_buf);
848 if (unlikely(ret != 0))
849 goto out_no_dmabuf;
850
851 vmw_dmabuf_unreference(&dma_buf);
852 out_no_dmabuf:
853 ttm_read_unlock(&vmaster->lock);
854 return ret;
855 }
856
857 /**
858 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
859 *
860 * @file_priv: Pointer to a struct drm_file identifying the caller.
861 * @dev: Pointer to the drm device.
862 * @handle: Handle identifying the dumb buffer.
863 * @offset: The address space offset returned.
864 *
865 * This is a driver callback for the core drm dumb_map_offset functionality.
866 */
867 int vmw_dumb_map_offset(struct drm_file *file_priv,
868 struct drm_device *dev, uint32_t handle,
869 uint64_t *offset)
870 {
871 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
872 struct vmw_dma_buffer *out_buf;
873 int ret;
874
875 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
876 if (ret != 0)
877 return -EINVAL;
878
879 *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
880 vmw_dmabuf_unreference(&out_buf);
881 return 0;
882 }
883
884 /**
885 * vmw_dumb_destroy - Destroy a dumb boffer
886 *
887 * @file_priv: Pointer to a struct drm_file identifying the caller.
888 * @dev: Pointer to the drm device.
889 * @handle: Handle identifying the dumb buffer.
890 *
891 * This is a driver callback for the core drm dumb_destroy functionality.
892 */
893 int vmw_dumb_destroy(struct drm_file *file_priv,
894 struct drm_device *dev,
895 uint32_t handle)
896 {
897 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
898 handle, TTM_REF_USAGE);
899 }
900
901 /**
902 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
903 *
904 * @res: The resource for which to allocate a backup buffer.
905 * @interruptible: Whether any sleeps during allocation should be
906 * performed while interruptible.
907 */
908 static int vmw_resource_buf_alloc(struct vmw_resource *res,
909 bool interruptible)
910 {
911 unsigned long size =
912 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
913 struct vmw_dma_buffer *backup;
914 int ret;
915
916 if (likely(res->backup)) {
917 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
918 return 0;
919 }
920
921 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
922 if (unlikely(backup == NULL))
923 return -ENOMEM;
924
925 ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
926 res->func->backup_placement,
927 interruptible,
928 &vmw_dmabuf_bo_free);
929 if (unlikely(ret != 0))
930 goto out_no_dmabuf;
931
932 res->backup = backup;
933
934 out_no_dmabuf:
935 return ret;
936 }
937
938 /**
939 * vmw_resource_do_validate - Make a resource up-to-date and visible
940 * to the device.
941 *
942 * @res: The resource to make visible to the device.
943 * @val_buf: Information about a buffer possibly
944 * containing backup data if a bind operation is needed.
945 *
946 * On hardware resource shortage, this function returns -EBUSY and
947 * should be retried once resources have been freed up.
948 */
949 static int vmw_resource_do_validate(struct vmw_resource *res,
950 struct ttm_validate_buffer *val_buf)
951 {
952 int ret = 0;
953 const struct vmw_res_func *func = res->func;
954
955 if (unlikely(res->id == -1)) {
956 ret = func->create(res);
957 if (unlikely(ret != 0))
958 return ret;
959 }
960
961 if (func->bind &&
962 ((func->needs_backup && list_empty(&res->mob_head) &&
963 val_buf->bo != NULL) ||
964 (!func->needs_backup && val_buf->bo != NULL))) {
965 ret = func->bind(res, val_buf);
966 if (unlikely(ret != 0))
967 goto out_bind_failed;
968 if (func->needs_backup)
969 list_add_tail(&res->mob_head, &res->backup->res_list);
970 }
971
972 /*
973 * Only do this on write operations, and move to
974 * vmw_resource_unreserve if it can be called after
975 * backup buffers have been unreserved. Otherwise
976 * sort out locking.
977 */
978 res->res_dirty = true;
979
980 return 0;
981
982 out_bind_failed:
983 func->destroy(res);
984
985 return ret;
986 }
987
988 /**
989 * vmw_resource_unreserve - Unreserve a resource previously reserved for
990 * command submission.
991 *
992 * @res: Pointer to the struct vmw_resource to unreserve.
993 * @new_backup: Pointer to new backup buffer if command submission
994 * switched.
995 * @new_backup_offset: New backup offset if @new_backup is !NULL.
996 *
997 * Currently unreserving a resource means putting it back on the device's
998 * resource lru list, so that it can be evicted if necessary.
999 */
1000 void vmw_resource_unreserve(struct vmw_resource *res,
1001 struct vmw_dma_buffer *new_backup,
1002 unsigned long new_backup_offset)
1003 {
1004 struct vmw_private *dev_priv = res->dev_priv;
1005
1006 if (!list_empty(&res->lru_head))
1007 return;
1008
1009 if (new_backup && new_backup != res->backup) {
1010
1011 if (res->backup) {
1012 lockdep_assert_held(&res->backup->base.resv->lock.base);
1013 list_del_init(&res->mob_head);
1014 vmw_dmabuf_unreference(&res->backup);
1015 }
1016
1017 res->backup = vmw_dmabuf_reference(new_backup);
1018 lockdep_assert_held(&new_backup->base.resv->lock.base);
1019 list_add_tail(&res->mob_head, &new_backup->res_list);
1020 }
1021 if (new_backup)
1022 res->backup_offset = new_backup_offset;
1023
1024 if (!res->func->may_evict || res->id == -1)
1025 return;
1026
1027 write_lock(&dev_priv->resource_lock);
1028 list_add_tail(&res->lru_head,
1029 &res->dev_priv->res_lru[res->func->res_type]);
1030 write_unlock(&dev_priv->resource_lock);
1031 }
1032
1033 /**
1034 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1035 * for a resource and in that case, allocate
1036 * one, reserve and validate it.
1037 *
1038 * @res: The resource for which to allocate a backup buffer.
1039 * @interruptible: Whether any sleeps during allocation should be
1040 * performed while interruptible.
1041 * @val_buf: On successful return contains data about the
1042 * reserved and validated backup buffer.
1043 */
1044 static int
1045 vmw_resource_check_buffer(struct vmw_resource *res,
1046 bool interruptible,
1047 struct ttm_validate_buffer *val_buf)
1048 {
1049 struct list_head val_list;
1050 bool backup_dirty = false;
1051 int ret;
1052
1053 if (unlikely(res->backup == NULL)) {
1054 ret = vmw_resource_buf_alloc(res, interruptible);
1055 if (unlikely(ret != 0))
1056 return ret;
1057 }
1058
1059 INIT_LIST_HEAD(&val_list);
1060 val_buf->bo = ttm_bo_reference(&res->backup->base);
1061 list_add_tail(&val_buf->head, &val_list);
1062 ret = ttm_eu_reserve_buffers(NULL, &val_list);
1063 if (unlikely(ret != 0))
1064 goto out_no_reserve;
1065
1066 if (res->func->needs_backup && list_empty(&res->mob_head))
1067 return 0;
1068
1069 backup_dirty = res->backup_dirty;
1070 ret = ttm_bo_validate(&res->backup->base,
1071 res->func->backup_placement,
1072 true, false);
1073
1074 if (unlikely(ret != 0))
1075 goto out_no_validate;
1076
1077 return 0;
1078
1079 out_no_validate:
1080 ttm_eu_backoff_reservation(NULL, &val_list);
1081 out_no_reserve:
1082 ttm_bo_unref(&val_buf->bo);
1083 if (backup_dirty)
1084 vmw_dmabuf_unreference(&res->backup);
1085
1086 return ret;
1087 }
1088
1089 /**
1090 * vmw_resource_reserve - Reserve a resource for command submission
1091 *
1092 * @res: The resource to reserve.
1093 *
1094 * This function takes the resource off the LRU list and make sure
1095 * a backup buffer is present for guest-backed resources. However,
1096 * the buffer may not be bound to the resource at this point.
1097 *
1098 */
1099 int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
1100 {
1101 struct vmw_private *dev_priv = res->dev_priv;
1102 int ret;
1103
1104 write_lock(&dev_priv->resource_lock);
1105 list_del_init(&res->lru_head);
1106 write_unlock(&dev_priv->resource_lock);
1107
1108 if (res->func->needs_backup && res->backup == NULL &&
1109 !no_backup) {
1110 ret = vmw_resource_buf_alloc(res, true);
1111 if (unlikely(ret != 0))
1112 return ret;
1113 }
1114
1115 return 0;
1116 }
1117
1118 /**
1119 * vmw_resource_backoff_reservation - Unreserve and unreference a
1120 * backup buffer
1121 *.
1122 * @val_buf: Backup buffer information.
1123 */
1124 static void
1125 vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
1126 {
1127 struct list_head val_list;
1128
1129 if (likely(val_buf->bo == NULL))
1130 return;
1131
1132 INIT_LIST_HEAD(&val_list);
1133 list_add_tail(&val_buf->head, &val_list);
1134 ttm_eu_backoff_reservation(NULL, &val_list);
1135 ttm_bo_unref(&val_buf->bo);
1136 }
1137
1138 /**
1139 * vmw_resource_do_evict - Evict a resource, and transfer its data
1140 * to a backup buffer.
1141 *
1142 * @res: The resource to evict.
1143 * @interruptible: Whether to wait interruptible.
1144 */
1145 int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
1146 {
1147 struct ttm_validate_buffer val_buf;
1148 const struct vmw_res_func *func = res->func;
1149 int ret;
1150
1151 BUG_ON(!func->may_evict);
1152
1153 val_buf.bo = NULL;
1154 ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
1155 if (unlikely(ret != 0))
1156 return ret;
1157
1158 if (unlikely(func->unbind != NULL &&
1159 (!func->needs_backup || !list_empty(&res->mob_head)))) {
1160 ret = func->unbind(res, res->res_dirty, &val_buf);
1161 if (unlikely(ret != 0))
1162 goto out_no_unbind;
1163 list_del_init(&res->mob_head);
1164 }
1165 ret = func->destroy(res);
1166 res->backup_dirty = true;
1167 res->res_dirty = false;
1168 out_no_unbind:
1169 vmw_resource_backoff_reservation(&val_buf);
1170
1171 return ret;
1172 }
1173
1174
1175 /**
1176 * vmw_resource_validate - Make a resource up-to-date and visible
1177 * to the device.
1178 *
1179 * @res: The resource to make visible to the device.
1180 *
1181 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1182 * be reserved and validated.
1183 * On hardware resource shortage, this function will repeatedly evict
1184 * resources of the same type until the validation succeeds.
1185 */
1186 int vmw_resource_validate(struct vmw_resource *res)
1187 {
1188 int ret;
1189 struct vmw_resource *evict_res;
1190 struct vmw_private *dev_priv = res->dev_priv;
1191 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
1192 struct ttm_validate_buffer val_buf;
1193 unsigned err_count = 0;
1194
1195 if (likely(!res->func->may_evict))
1196 return 0;
1197
1198 val_buf.bo = NULL;
1199 if (res->backup)
1200 val_buf.bo = &res->backup->base;
1201 do {
1202 ret = vmw_resource_do_validate(res, &val_buf);
1203 if (likely(ret != -EBUSY))
1204 break;
1205
1206 write_lock(&dev_priv->resource_lock);
1207 if (list_empty(lru_list) || !res->func->may_evict) {
1208 DRM_ERROR("Out of device device resources "
1209 "for %s.\n", res->func->type_name);
1210 ret = -EBUSY;
1211 write_unlock(&dev_priv->resource_lock);
1212 break;
1213 }
1214
1215 evict_res = vmw_resource_reference
1216 (list_first_entry(lru_list, struct vmw_resource,
1217 lru_head));
1218 list_del_init(&evict_res->lru_head);
1219
1220 write_unlock(&dev_priv->resource_lock);
1221
1222 ret = vmw_resource_do_evict(evict_res, true);
1223 if (unlikely(ret != 0)) {
1224 write_lock(&dev_priv->resource_lock);
1225 list_add_tail(&evict_res->lru_head, lru_list);
1226 write_unlock(&dev_priv->resource_lock);
1227 if (ret == -ERESTARTSYS ||
1228 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
1229 vmw_resource_unreference(&evict_res);
1230 goto out_no_validate;
1231 }
1232 }
1233
1234 vmw_resource_unreference(&evict_res);
1235 } while (1);
1236
1237 if (unlikely(ret != 0))
1238 goto out_no_validate;
1239 else if (!res->func->needs_backup && res->backup) {
1240 list_del_init(&res->mob_head);
1241 vmw_dmabuf_unreference(&res->backup);
1242 }
1243
1244 return 0;
1245
1246 out_no_validate:
1247 return ret;
1248 }
1249
1250 /**
1251 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1252 * object without unreserving it.
1253 *
1254 * @bo: Pointer to the struct ttm_buffer_object to fence.
1255 * @fence: Pointer to the fence. If NULL, this function will
1256 * insert a fence into the command stream..
1257 *
1258 * Contrary to the ttm_eu version of this function, it takes only
1259 * a single buffer object instead of a list, and it also doesn't
1260 * unreserve the buffer object, which needs to be done separately.
1261 */
1262 void vmw_fence_single_bo(struct ttm_buffer_object *bo,
1263 struct vmw_fence_obj *fence)
1264 {
1265 struct ttm_bo_device *bdev = bo->bdev;
1266 struct ttm_bo_driver *driver = bdev->driver;
1267 struct vmw_fence_obj *old_fence_obj;
1268 struct vmw_private *dev_priv =
1269 container_of(bdev, struct vmw_private, bdev);
1270
1271 if (fence == NULL)
1272 vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
1273 else
1274 driver->sync_obj_ref(fence);
1275
1276 spin_lock(&bdev->fence_lock);
1277
1278 old_fence_obj = bo->sync_obj;
1279 bo->sync_obj = fence;
1280
1281 spin_unlock(&bdev->fence_lock);
1282
1283 if (old_fence_obj)
1284 vmw_fence_obj_unreference(&old_fence_obj);
1285 }
1286
1287 /**
1288 * vmw_resource_move_notify - TTM move_notify_callback
1289 *
1290 * @bo: The TTM buffer object about to move.
1291 * @mem: The truct ttm_mem_reg indicating to what memory
1292 * region the move is taking place.
1293 *
1294 * For now does nothing.
1295 */
1296 void vmw_resource_move_notify(struct ttm_buffer_object *bo,
1297 struct ttm_mem_reg *mem)
1298 {
1299 }
1300
1301 /**
1302 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1303 *
1304 * @res: The resource being queried.
1305 */
1306 bool vmw_resource_needs_backup(const struct vmw_resource *res)
1307 {
1308 return res->func->needs_backup;
1309 }
1310
1311 /**
1312 * vmw_resource_evict_type - Evict all resources of a specific type
1313 *
1314 * @dev_priv: Pointer to a device private struct
1315 * @type: The resource type to evict
1316 *
1317 * To avoid thrashing starvation or as part of the hibernation sequence,
1318 * try to evict all evictable resources of a specific type.
1319 */
1320 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
1321 enum vmw_res_type type)
1322 {
1323 struct list_head *lru_list = &dev_priv->res_lru[type];
1324 struct vmw_resource *evict_res;
1325 unsigned err_count = 0;
1326 int ret;
1327
1328 do {
1329 write_lock(&dev_priv->resource_lock);
1330
1331 if (list_empty(lru_list))
1332 goto out_unlock;
1333
1334 evict_res = vmw_resource_reference(
1335 list_first_entry(lru_list, struct vmw_resource,
1336 lru_head));
1337 list_del_init(&evict_res->lru_head);
1338 write_unlock(&dev_priv->resource_lock);
1339
1340 ret = vmw_resource_do_evict(evict_res, false);
1341 if (unlikely(ret != 0)) {
1342 write_lock(&dev_priv->resource_lock);
1343 list_add_tail(&evict_res->lru_head, lru_list);
1344 write_unlock(&dev_priv->resource_lock);
1345 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
1346 vmw_resource_unreference(&evict_res);
1347 return;
1348 }
1349 }
1350
1351 vmw_resource_unreference(&evict_res);
1352 } while (1);
1353
1354 out_unlock:
1355 write_unlock(&dev_priv->resource_lock);
1356 }
1357
1358 /**
1359 * vmw_resource_evict_all - Evict all evictable resources
1360 *
1361 * @dev_priv: Pointer to a device private struct
1362 *
1363 * To avoid thrashing starvation or as part of the hibernation sequence,
1364 * evict all evictable resources. In particular this means that all
1365 * guest-backed resources that are registered with the device are
1366 * evicted and the OTable becomes clean.
1367 */
1368 void vmw_resource_evict_all(struct vmw_private *dev_priv)
1369 {
1370 enum vmw_res_type type;
1371
1372 mutex_lock(&dev_priv->cmdbuf_mutex);
1373
1374 for (type = 0; type < vmw_res_max; ++type)
1375 vmw_resource_evict_type(dev_priv, type);
1376
1377 mutex_unlock(&dev_priv->cmdbuf_mutex);
1378 }
This page took 0.065661 seconds and 6 git commands to generate.