1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_object.h>
31 #include <drm/ttm/ttm_placement.h>
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
37 struct vmw_user_dma_buffer
{
38 struct ttm_prime_object prime
;
39 struct vmw_dma_buffer dma
;
42 struct vmw_bo_user_rep
{
48 struct vmw_resource res
;
52 struct vmw_user_stream
{
53 struct ttm_base_object base
;
54 struct vmw_stream stream
;
58 static uint64_t vmw_user_stream_size
;
60 static const struct vmw_res_func vmw_stream_func
= {
61 .res_type
= vmw_res_stream
,
62 .needs_backup
= false,
64 .type_name
= "video streams",
65 .backup_placement
= NULL
,
72 static inline struct vmw_dma_buffer
*
73 vmw_dma_buffer(struct ttm_buffer_object
*bo
)
75 return container_of(bo
, struct vmw_dma_buffer
, base
);
78 static inline struct vmw_user_dma_buffer
*
79 vmw_user_dma_buffer(struct ttm_buffer_object
*bo
)
81 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
82 return container_of(vmw_bo
, struct vmw_user_dma_buffer
, dma
);
85 struct vmw_resource
*vmw_resource_reference(struct vmw_resource
*res
)
92 vmw_resource_reference_unless_doomed(struct vmw_resource
*res
)
94 return kref_get_unless_zero(&res
->kref
) ? res
: NULL
;
98 * vmw_resource_release_id - release a resource id to the id manager.
100 * @res: Pointer to the resource.
102 * Release the resource id to the resource id manager and set it to -1
104 void vmw_resource_release_id(struct vmw_resource
*res
)
106 struct vmw_private
*dev_priv
= res
->dev_priv
;
107 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
109 write_lock(&dev_priv
->resource_lock
);
111 idr_remove(idr
, res
->id
);
113 write_unlock(&dev_priv
->resource_lock
);
116 static void vmw_resource_release(struct kref
*kref
)
118 struct vmw_resource
*res
=
119 container_of(kref
, struct vmw_resource
, kref
);
120 struct vmw_private
*dev_priv
= res
->dev_priv
;
122 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
125 list_del_init(&res
->lru_head
);
126 write_unlock(&dev_priv
->resource_lock
);
128 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
130 ttm_bo_reserve(bo
, false, false, false, NULL
);
131 if (!list_empty(&res
->mob_head
) &&
132 res
->func
->unbind
!= NULL
) {
133 struct ttm_validate_buffer val_buf
;
136 res
->func
->unbind(res
, false, &val_buf
);
138 res
->backup_dirty
= false;
139 list_del_init(&res
->mob_head
);
140 ttm_bo_unreserve(bo
);
141 vmw_dmabuf_unreference(&res
->backup
);
144 if (likely(res
->hw_destroy
!= NULL
)) {
145 res
->hw_destroy(res
);
146 mutex_lock(&dev_priv
->binding_mutex
);
147 vmw_context_binding_res_list_kill(&res
->binding_head
);
148 mutex_unlock(&dev_priv
->binding_mutex
);
152 if (res
->res_free
!= NULL
)
157 write_lock(&dev_priv
->resource_lock
);
163 void vmw_resource_unreference(struct vmw_resource
**p_res
)
165 struct vmw_resource
*res
= *p_res
;
166 struct vmw_private
*dev_priv
= res
->dev_priv
;
169 write_lock(&dev_priv
->resource_lock
);
170 kref_put(&res
->kref
, vmw_resource_release
);
171 write_unlock(&dev_priv
->resource_lock
);
176 * vmw_resource_alloc_id - release a resource id to the id manager.
178 * @res: Pointer to the resource.
180 * Allocate the lowest free resource from the resource manager, and set
181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183 int vmw_resource_alloc_id(struct vmw_resource
*res
)
185 struct vmw_private
*dev_priv
= res
->dev_priv
;
187 struct idr
*idr
= &dev_priv
->res_idr
[res
->func
->res_type
];
189 BUG_ON(res
->id
!= -1);
191 idr_preload(GFP_KERNEL
);
192 write_lock(&dev_priv
->resource_lock
);
194 ret
= idr_alloc(idr
, res
, 1, 0, GFP_NOWAIT
);
198 write_unlock(&dev_priv
->resource_lock
);
200 return ret
< 0 ? ret
: 0;
204 * vmw_resource_init - initialize a struct vmw_resource
206 * @dev_priv: Pointer to a device private struct.
207 * @res: The struct vmw_resource to initialize.
208 * @obj_type: Resource object type.
209 * @delay_id: Boolean whether to defer device id allocation until
210 * the first validation.
211 * @res_free: Resource destructor.
212 * @func: Resource function table.
214 int vmw_resource_init(struct vmw_private
*dev_priv
, struct vmw_resource
*res
,
216 void (*res_free
) (struct vmw_resource
*res
),
217 const struct vmw_res_func
*func
)
219 kref_init(&res
->kref
);
220 res
->hw_destroy
= NULL
;
221 res
->res_free
= res_free
;
223 res
->dev_priv
= dev_priv
;
225 INIT_LIST_HEAD(&res
->lru_head
);
226 INIT_LIST_HEAD(&res
->mob_head
);
227 INIT_LIST_HEAD(&res
->binding_head
);
230 res
->backup_offset
= 0;
231 res
->backup_dirty
= false;
232 res
->res_dirty
= false;
236 return vmw_resource_alloc_id(res
);
240 * vmw_resource_activate
242 * @res: Pointer to the newly created resource
243 * @hw_destroy: Destroy function. NULL if none.
245 * Activate a resource after the hardware has been made aware of it.
246 * Set tye destroy function to @destroy. Typically this frees the
247 * resource and destroys the hardware resources associated with it.
248 * Activate basically means that the function vmw_resource_lookup will
251 void vmw_resource_activate(struct vmw_resource
*res
,
252 void (*hw_destroy
) (struct vmw_resource
*))
254 struct vmw_private
*dev_priv
= res
->dev_priv
;
256 write_lock(&dev_priv
->resource_lock
);
258 res
->hw_destroy
= hw_destroy
;
259 write_unlock(&dev_priv
->resource_lock
);
262 struct vmw_resource
*vmw_resource_lookup(struct vmw_private
*dev_priv
,
263 struct idr
*idr
, int id
)
265 struct vmw_resource
*res
;
267 read_lock(&dev_priv
->resource_lock
);
268 res
= idr_find(idr
, id
);
269 if (res
&& res
->avail
)
270 kref_get(&res
->kref
);
273 read_unlock(&dev_priv
->resource_lock
);
275 if (unlikely(res
== NULL
))
282 * vmw_user_resource_lookup_handle - lookup a struct resource from a
283 * TTM user-space handle and perform basic type checks
285 * @dev_priv: Pointer to a device private struct
286 * @tfile: Pointer to a struct ttm_object_file identifying the caller
287 * @handle: The TTM user-space handle
288 * @converter: Pointer to an object describing the resource type
289 * @p_res: On successful return the location pointed to will contain
290 * a pointer to a refcounted struct vmw_resource.
292 * If the handle can't be found or is associated with an incorrect resource
293 * type, -EINVAL will be returned.
295 int vmw_user_resource_lookup_handle(struct vmw_private
*dev_priv
,
296 struct ttm_object_file
*tfile
,
298 const struct vmw_user_resource_conv
300 struct vmw_resource
**p_res
)
302 struct ttm_base_object
*base
;
303 struct vmw_resource
*res
;
306 base
= ttm_base_object_lookup(tfile
, handle
);
307 if (unlikely(base
== NULL
))
310 if (unlikely(ttm_base_object_type(base
) != converter
->object_type
))
311 goto out_bad_resource
;
313 res
= converter
->base_obj_to_res(base
);
315 read_lock(&dev_priv
->resource_lock
);
316 if (!res
->avail
|| res
->res_free
!= converter
->res_free
) {
317 read_unlock(&dev_priv
->resource_lock
);
318 goto out_bad_resource
;
321 kref_get(&res
->kref
);
322 read_unlock(&dev_priv
->resource_lock
);
328 ttm_base_object_unref(&base
);
334 * Helper function that looks either a surface or dmabuf.
336 * The pointer this pointed at by out_surf and out_buf needs to be null.
338 int vmw_user_lookup_handle(struct vmw_private
*dev_priv
,
339 struct ttm_object_file
*tfile
,
341 struct vmw_surface
**out_surf
,
342 struct vmw_dma_buffer
**out_buf
)
344 struct vmw_resource
*res
;
347 BUG_ON(*out_surf
|| *out_buf
);
349 ret
= vmw_user_resource_lookup_handle(dev_priv
, tfile
, handle
,
350 user_surface_converter
,
353 *out_surf
= vmw_res_to_srf(res
);
358 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, out_buf
);
367 * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
369 * @dev_priv: Pointer to a struct vmw_private identifying the device.
370 * @size: The requested buffer size.
371 * @user: Whether this is an ordinary dma buffer or a user dma buffer.
373 static size_t vmw_dmabuf_acc_size(struct vmw_private
*dev_priv
, size_t size
,
376 static size_t struct_size
, user_struct_size
;
377 size_t num_pages
= PAGE_ALIGN(size
) >> PAGE_SHIFT
;
378 size_t page_array_size
= ttm_round_pot(num_pages
* sizeof(void *));
380 if (unlikely(struct_size
== 0)) {
381 size_t backend_size
= ttm_round_pot(vmw_tt_size
);
383 struct_size
= backend_size
+
384 ttm_round_pot(sizeof(struct vmw_dma_buffer
));
385 user_struct_size
= backend_size
+
386 ttm_round_pot(sizeof(struct vmw_user_dma_buffer
));
389 if (dev_priv
->map_mode
== vmw_dma_alloc_coherent
)
391 ttm_round_pot(num_pages
* sizeof(dma_addr_t
));
393 return ((user
) ? user_struct_size
: struct_size
) +
397 void vmw_dmabuf_bo_free(struct ttm_buffer_object
*bo
)
399 struct vmw_dma_buffer
*vmw_bo
= vmw_dma_buffer(bo
);
404 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object
*bo
)
406 struct vmw_user_dma_buffer
*vmw_user_bo
= vmw_user_dma_buffer(bo
);
408 ttm_prime_object_kfree(vmw_user_bo
, prime
);
411 int vmw_dmabuf_init(struct vmw_private
*dev_priv
,
412 struct vmw_dma_buffer
*vmw_bo
,
413 size_t size
, struct ttm_placement
*placement
,
415 void (*bo_free
) (struct ttm_buffer_object
*bo
))
417 struct ttm_bo_device
*bdev
= &dev_priv
->bdev
;
420 bool user
= (bo_free
== &vmw_user_dmabuf_destroy
);
422 BUG_ON(!bo_free
&& (!user
&& (bo_free
!= vmw_dmabuf_bo_free
)));
424 acc_size
= vmw_dmabuf_acc_size(dev_priv
, size
, user
);
425 memset(vmw_bo
, 0, sizeof(*vmw_bo
));
427 INIT_LIST_HEAD(&vmw_bo
->res_list
);
429 ret
= ttm_bo_init(bdev
, &vmw_bo
->base
, size
,
430 ttm_bo_type_device
, placement
,
432 NULL
, acc_size
, NULL
, bo_free
);
436 static void vmw_user_dmabuf_release(struct ttm_base_object
**p_base
)
438 struct vmw_user_dma_buffer
*vmw_user_bo
;
439 struct ttm_base_object
*base
= *p_base
;
440 struct ttm_buffer_object
*bo
;
444 if (unlikely(base
== NULL
))
447 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
449 bo
= &vmw_user_bo
->dma
.base
;
453 static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object
*base
,
454 enum ttm_ref_type ref_type
)
456 struct vmw_user_dma_buffer
*user_bo
;
457 user_bo
= container_of(base
, struct vmw_user_dma_buffer
, prime
.base
);
460 case TTM_REF_SYNCCPU_WRITE
:
461 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
469 * vmw_user_dmabuf_alloc - Allocate a user dma buffer
471 * @dev_priv: Pointer to a struct device private.
472 * @tfile: Pointer to a struct ttm_object_file on which to register the user
474 * @size: Size of the dma buffer.
475 * @shareable: Boolean whether the buffer is shareable with other open files.
476 * @handle: Pointer to where the handle value should be assigned.
477 * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
478 * should be assigned.
480 int vmw_user_dmabuf_alloc(struct vmw_private
*dev_priv
,
481 struct ttm_object_file
*tfile
,
485 struct vmw_dma_buffer
**p_dma_buf
)
487 struct vmw_user_dma_buffer
*user_bo
;
488 struct ttm_buffer_object
*tmp
;
491 user_bo
= kzalloc(sizeof(*user_bo
), GFP_KERNEL
);
492 if (unlikely(user_bo
== NULL
)) {
493 DRM_ERROR("Failed to allocate a buffer.\n");
497 ret
= vmw_dmabuf_init(dev_priv
, &user_bo
->dma
, size
,
498 (dev_priv
->has_mob
) ?
500 &vmw_vram_sys_placement
, true,
501 &vmw_user_dmabuf_destroy
);
502 if (unlikely(ret
!= 0))
505 tmp
= ttm_bo_reference(&user_bo
->dma
.base
);
506 ret
= ttm_prime_object_init(tfile
,
511 &vmw_user_dmabuf_release
,
512 &vmw_user_dmabuf_ref_obj_release
);
513 if (unlikely(ret
!= 0)) {
515 goto out_no_base_object
;
518 *p_dma_buf
= &user_bo
->dma
;
519 *handle
= user_bo
->prime
.base
.hash
.key
;
526 * vmw_user_dmabuf_verify_access - verify access permissions on this
529 * @bo: Pointer to the buffer object being accessed
530 * @tfile: Identifying the caller.
532 int vmw_user_dmabuf_verify_access(struct ttm_buffer_object
*bo
,
533 struct ttm_object_file
*tfile
)
535 struct vmw_user_dma_buffer
*vmw_user_bo
;
537 if (unlikely(bo
->destroy
!= vmw_user_dmabuf_destroy
))
540 vmw_user_bo
= vmw_user_dma_buffer(bo
);
542 /* Check that the caller has opened the object. */
543 if (likely(ttm_ref_object_exists(tfile
, &vmw_user_bo
->prime
.base
)))
546 DRM_ERROR("Could not grant buffer access.\n");
551 * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
552 * access, idling previous GPU operations on the buffer and optionally
553 * blocking it for further command submissions.
555 * @user_bo: Pointer to the buffer object being grabbed for CPU access
556 * @tfile: Identifying the caller.
557 * @flags: Flags indicating how the grab should be performed.
559 * A blocking grab will be automatically released when @tfile is closed.
561 static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer
*user_bo
,
562 struct ttm_object_file
*tfile
,
565 struct ttm_buffer_object
*bo
= &user_bo
->dma
.base
;
569 if (flags
& drm_vmw_synccpu_allow_cs
) {
570 bool nonblock
= !!(flags
& drm_vmw_synccpu_dontblock
);
572 ret
= ttm_bo_reserve(bo
, true, nonblock
, false, NULL
);
574 ret
= ttm_bo_wait(bo
, false, true, nonblock
);
575 ttm_bo_unreserve(bo
);
580 ret
= ttm_bo_synccpu_write_grab
581 (bo
, !!(flags
& drm_vmw_synccpu_dontblock
));
582 if (unlikely(ret
!= 0))
585 ret
= ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
586 TTM_REF_SYNCCPU_WRITE
, &existed
);
587 if (ret
!= 0 || existed
)
588 ttm_bo_synccpu_write_release(&user_bo
->dma
.base
);
594 * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
595 * and unblock command submission on the buffer if blocked.
597 * @handle: Handle identifying the buffer object.
598 * @tfile: Identifying the caller.
599 * @flags: Flags indicating the type of release.
601 static int vmw_user_dmabuf_synccpu_release(uint32_t handle
,
602 struct ttm_object_file
*tfile
,
605 if (!(flags
& drm_vmw_synccpu_allow_cs
))
606 return ttm_ref_object_base_unref(tfile
, handle
,
607 TTM_REF_SYNCCPU_WRITE
);
613 * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
616 * @dev: Identifies the drm device.
617 * @data: Pointer to the ioctl argument.
618 * @file_priv: Identifies the caller.
620 * This function checks the ioctl arguments for validity and calls the
621 * relevant synccpu functions.
623 int vmw_user_dmabuf_synccpu_ioctl(struct drm_device
*dev
, void *data
,
624 struct drm_file
*file_priv
)
626 struct drm_vmw_synccpu_arg
*arg
=
627 (struct drm_vmw_synccpu_arg
*) data
;
628 struct vmw_dma_buffer
*dma_buf
;
629 struct vmw_user_dma_buffer
*user_bo
;
630 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
633 if ((arg
->flags
& (drm_vmw_synccpu_read
| drm_vmw_synccpu_write
)) == 0
634 || (arg
->flags
& ~(drm_vmw_synccpu_read
| drm_vmw_synccpu_write
|
635 drm_vmw_synccpu_dontblock
|
636 drm_vmw_synccpu_allow_cs
)) != 0) {
637 DRM_ERROR("Illegal synccpu flags.\n");
642 case drm_vmw_synccpu_grab
:
643 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->handle
, &dma_buf
);
644 if (unlikely(ret
!= 0))
647 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
,
649 ret
= vmw_user_dmabuf_synccpu_grab(user_bo
, tfile
, arg
->flags
);
650 vmw_dmabuf_unreference(&dma_buf
);
651 if (unlikely(ret
!= 0 && ret
!= -ERESTARTSYS
&&
653 DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
654 (unsigned int) arg
->handle
);
658 case drm_vmw_synccpu_release
:
659 ret
= vmw_user_dmabuf_synccpu_release(arg
->handle
, tfile
,
661 if (unlikely(ret
!= 0)) {
662 DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
663 (unsigned int) arg
->handle
);
668 DRM_ERROR("Invalid synccpu operation.\n");
675 int vmw_dmabuf_alloc_ioctl(struct drm_device
*dev
, void *data
,
676 struct drm_file
*file_priv
)
678 struct vmw_private
*dev_priv
= vmw_priv(dev
);
679 union drm_vmw_alloc_dmabuf_arg
*arg
=
680 (union drm_vmw_alloc_dmabuf_arg
*)data
;
681 struct drm_vmw_alloc_dmabuf_req
*req
= &arg
->req
;
682 struct drm_vmw_dmabuf_rep
*rep
= &arg
->rep
;
683 struct vmw_dma_buffer
*dma_buf
;
687 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
688 if (unlikely(ret
!= 0))
691 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
692 req
->size
, false, &handle
, &dma_buf
);
693 if (unlikely(ret
!= 0))
696 rep
->handle
= handle
;
697 rep
->map_handle
= drm_vma_node_offset_addr(&dma_buf
->base
.vma_node
);
698 rep
->cur_gmr_id
= handle
;
699 rep
->cur_gmr_offset
= 0;
701 vmw_dmabuf_unreference(&dma_buf
);
704 ttm_read_unlock(&dev_priv
->reservation_sem
);
709 int vmw_dmabuf_unref_ioctl(struct drm_device
*dev
, void *data
,
710 struct drm_file
*file_priv
)
712 struct drm_vmw_unref_dmabuf_arg
*arg
=
713 (struct drm_vmw_unref_dmabuf_arg
*)data
;
715 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
720 int vmw_user_dmabuf_lookup(struct ttm_object_file
*tfile
,
721 uint32_t handle
, struct vmw_dma_buffer
**out
)
723 struct vmw_user_dma_buffer
*vmw_user_bo
;
724 struct ttm_base_object
*base
;
726 base
= ttm_base_object_lookup(tfile
, handle
);
727 if (unlikely(base
== NULL
)) {
728 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
729 (unsigned long)handle
);
733 if (unlikely(ttm_base_object_type(base
) != ttm_buffer_type
)) {
734 ttm_base_object_unref(&base
);
735 printk(KERN_ERR
"Invalid buffer object handle 0x%08lx.\n",
736 (unsigned long)handle
);
740 vmw_user_bo
= container_of(base
, struct vmw_user_dma_buffer
,
742 (void)ttm_bo_reference(&vmw_user_bo
->dma
.base
);
743 ttm_base_object_unref(&base
);
744 *out
= &vmw_user_bo
->dma
;
749 int vmw_user_dmabuf_reference(struct ttm_object_file
*tfile
,
750 struct vmw_dma_buffer
*dma_buf
,
753 struct vmw_user_dma_buffer
*user_bo
;
755 if (dma_buf
->base
.destroy
!= vmw_user_dmabuf_destroy
)
758 user_bo
= container_of(dma_buf
, struct vmw_user_dma_buffer
, dma
);
760 *handle
= user_bo
->prime
.base
.hash
.key
;
761 return ttm_ref_object_add(tfile
, &user_bo
->prime
.base
,
762 TTM_REF_USAGE
, NULL
);
769 static void vmw_stream_destroy(struct vmw_resource
*res
)
771 struct vmw_private
*dev_priv
= res
->dev_priv
;
772 struct vmw_stream
*stream
;
775 DRM_INFO("%s: unref\n", __func__
);
776 stream
= container_of(res
, struct vmw_stream
, res
);
778 ret
= vmw_overlay_unref(dev_priv
, stream
->stream_id
);
782 static int vmw_stream_init(struct vmw_private
*dev_priv
,
783 struct vmw_stream
*stream
,
784 void (*res_free
) (struct vmw_resource
*res
))
786 struct vmw_resource
*res
= &stream
->res
;
789 ret
= vmw_resource_init(dev_priv
, res
, false, res_free
,
792 if (unlikely(ret
!= 0)) {
793 if (res_free
== NULL
)
796 res_free(&stream
->res
);
800 ret
= vmw_overlay_claim(dev_priv
, &stream
->stream_id
);
802 vmw_resource_unreference(&res
);
806 DRM_INFO("%s: claimed\n", __func__
);
808 vmw_resource_activate(&stream
->res
, vmw_stream_destroy
);
812 static void vmw_user_stream_free(struct vmw_resource
*res
)
814 struct vmw_user_stream
*stream
=
815 container_of(res
, struct vmw_user_stream
, stream
.res
);
816 struct vmw_private
*dev_priv
= res
->dev_priv
;
818 ttm_base_object_kfree(stream
, base
);
819 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
820 vmw_user_stream_size
);
824 * This function is called when user space has no more references on the
825 * base object. It releases the base-object's reference on the resource object.
828 static void vmw_user_stream_base_release(struct ttm_base_object
**p_base
)
830 struct ttm_base_object
*base
= *p_base
;
831 struct vmw_user_stream
*stream
=
832 container_of(base
, struct vmw_user_stream
, base
);
833 struct vmw_resource
*res
= &stream
->stream
.res
;
836 vmw_resource_unreference(&res
);
839 int vmw_stream_unref_ioctl(struct drm_device
*dev
, void *data
,
840 struct drm_file
*file_priv
)
842 struct vmw_private
*dev_priv
= vmw_priv(dev
);
843 struct vmw_resource
*res
;
844 struct vmw_user_stream
*stream
;
845 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
846 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
847 struct idr
*idr
= &dev_priv
->res_idr
[vmw_res_stream
];
851 res
= vmw_resource_lookup(dev_priv
, idr
, arg
->stream_id
);
852 if (unlikely(res
== NULL
))
855 if (res
->res_free
!= &vmw_user_stream_free
) {
860 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
861 if (stream
->base
.tfile
!= tfile
) {
866 ttm_ref_object_base_unref(tfile
, stream
->base
.hash
.key
, TTM_REF_USAGE
);
868 vmw_resource_unreference(&res
);
872 int vmw_stream_claim_ioctl(struct drm_device
*dev
, void *data
,
873 struct drm_file
*file_priv
)
875 struct vmw_private
*dev_priv
= vmw_priv(dev
);
876 struct vmw_user_stream
*stream
;
877 struct vmw_resource
*res
;
878 struct vmw_resource
*tmp
;
879 struct drm_vmw_stream_arg
*arg
= (struct drm_vmw_stream_arg
*)data
;
880 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
884 * Approximate idr memory usage with 128 bytes. It will be limited
885 * by maximum number_of streams anyway?
888 if (unlikely(vmw_user_stream_size
== 0))
889 vmw_user_stream_size
= ttm_round_pot(sizeof(*stream
)) + 128;
891 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
892 if (unlikely(ret
!= 0))
895 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
896 vmw_user_stream_size
,
898 if (unlikely(ret
!= 0)) {
899 if (ret
!= -ERESTARTSYS
)
900 DRM_ERROR("Out of graphics memory for stream"
906 stream
= kmalloc(sizeof(*stream
), GFP_KERNEL
);
907 if (unlikely(stream
== NULL
)) {
908 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
909 vmw_user_stream_size
);
914 res
= &stream
->stream
.res
;
915 stream
->base
.shareable
= false;
916 stream
->base
.tfile
= NULL
;
919 * From here on, the destructor takes over resource freeing.
922 ret
= vmw_stream_init(dev_priv
, &stream
->stream
, vmw_user_stream_free
);
923 if (unlikely(ret
!= 0))
926 tmp
= vmw_resource_reference(res
);
927 ret
= ttm_base_object_init(tfile
, &stream
->base
, false, VMW_RES_STREAM
,
928 &vmw_user_stream_base_release
, NULL
);
930 if (unlikely(ret
!= 0)) {
931 vmw_resource_unreference(&tmp
);
935 arg
->stream_id
= res
->id
;
937 vmw_resource_unreference(&res
);
939 ttm_read_unlock(&dev_priv
->reservation_sem
);
943 int vmw_user_stream_lookup(struct vmw_private
*dev_priv
,
944 struct ttm_object_file
*tfile
,
945 uint32_t *inout_id
, struct vmw_resource
**out
)
947 struct vmw_user_stream
*stream
;
948 struct vmw_resource
*res
;
951 res
= vmw_resource_lookup(dev_priv
, &dev_priv
->res_idr
[vmw_res_stream
],
953 if (unlikely(res
== NULL
))
956 if (res
->res_free
!= &vmw_user_stream_free
) {
961 stream
= container_of(res
, struct vmw_user_stream
, stream
.res
);
962 if (stream
->base
.tfile
!= tfile
) {
967 *inout_id
= stream
->stream
.stream_id
;
971 vmw_resource_unreference(&res
);
977 * vmw_dumb_create - Create a dumb kms buffer
979 * @file_priv: Pointer to a struct drm_file identifying the caller.
980 * @dev: Pointer to the drm device.
981 * @args: Pointer to a struct drm_mode_create_dumb structure
983 * This is a driver callback for the core drm create_dumb functionality.
984 * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
985 * that the arguments have a different format.
987 int vmw_dumb_create(struct drm_file
*file_priv
,
988 struct drm_device
*dev
,
989 struct drm_mode_create_dumb
*args
)
991 struct vmw_private
*dev_priv
= vmw_priv(dev
);
992 struct vmw_dma_buffer
*dma_buf
;
995 args
->pitch
= args
->width
* ((args
->bpp
+ 7) / 8);
996 args
->size
= args
->pitch
* args
->height
;
998 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
999 if (unlikely(ret
!= 0))
1002 ret
= vmw_user_dmabuf_alloc(dev_priv
, vmw_fpriv(file_priv
)->tfile
,
1003 args
->size
, false, &args
->handle
,
1005 if (unlikely(ret
!= 0))
1008 vmw_dmabuf_unreference(&dma_buf
);
1010 ttm_read_unlock(&dev_priv
->reservation_sem
);
1015 * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
1017 * @file_priv: Pointer to a struct drm_file identifying the caller.
1018 * @dev: Pointer to the drm device.
1019 * @handle: Handle identifying the dumb buffer.
1020 * @offset: The address space offset returned.
1022 * This is a driver callback for the core drm dumb_map_offset functionality.
1024 int vmw_dumb_map_offset(struct drm_file
*file_priv
,
1025 struct drm_device
*dev
, uint32_t handle
,
1028 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
1029 struct vmw_dma_buffer
*out_buf
;
1032 ret
= vmw_user_dmabuf_lookup(tfile
, handle
, &out_buf
);
1036 *offset
= drm_vma_node_offset_addr(&out_buf
->base
.vma_node
);
1037 vmw_dmabuf_unreference(&out_buf
);
1042 * vmw_dumb_destroy - Destroy a dumb boffer
1044 * @file_priv: Pointer to a struct drm_file identifying the caller.
1045 * @dev: Pointer to the drm device.
1046 * @handle: Handle identifying the dumb buffer.
1048 * This is a driver callback for the core drm dumb_destroy functionality.
1050 int vmw_dumb_destroy(struct drm_file
*file_priv
,
1051 struct drm_device
*dev
,
1054 return ttm_ref_object_base_unref(vmw_fpriv(file_priv
)->tfile
,
1055 handle
, TTM_REF_USAGE
);
1059 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
1061 * @res: The resource for which to allocate a backup buffer.
1062 * @interruptible: Whether any sleeps during allocation should be
1063 * performed while interruptible.
1065 static int vmw_resource_buf_alloc(struct vmw_resource
*res
,
1068 unsigned long size
=
1069 (res
->backup_size
+ PAGE_SIZE
- 1) & PAGE_MASK
;
1070 struct vmw_dma_buffer
*backup
;
1073 if (likely(res
->backup
)) {
1074 BUG_ON(res
->backup
->base
.num_pages
* PAGE_SIZE
< size
);
1078 backup
= kzalloc(sizeof(*backup
), GFP_KERNEL
);
1079 if (unlikely(backup
== NULL
))
1082 ret
= vmw_dmabuf_init(res
->dev_priv
, backup
, res
->backup_size
,
1083 res
->func
->backup_placement
,
1085 &vmw_dmabuf_bo_free
);
1086 if (unlikely(ret
!= 0))
1089 res
->backup
= backup
;
1096 * vmw_resource_do_validate - Make a resource up-to-date and visible
1099 * @res: The resource to make visible to the device.
1100 * @val_buf: Information about a buffer possibly
1101 * containing backup data if a bind operation is needed.
1103 * On hardware resource shortage, this function returns -EBUSY and
1104 * should be retried once resources have been freed up.
1106 static int vmw_resource_do_validate(struct vmw_resource
*res
,
1107 struct ttm_validate_buffer
*val_buf
)
1110 const struct vmw_res_func
*func
= res
->func
;
1112 if (unlikely(res
->id
== -1)) {
1113 ret
= func
->create(res
);
1114 if (unlikely(ret
!= 0))
1119 ((func
->needs_backup
&& list_empty(&res
->mob_head
) &&
1120 val_buf
->bo
!= NULL
) ||
1121 (!func
->needs_backup
&& val_buf
->bo
!= NULL
))) {
1122 ret
= func
->bind(res
, val_buf
);
1123 if (unlikely(ret
!= 0))
1124 goto out_bind_failed
;
1125 if (func
->needs_backup
)
1126 list_add_tail(&res
->mob_head
, &res
->backup
->res_list
);
1130 * Only do this on write operations, and move to
1131 * vmw_resource_unreserve if it can be called after
1132 * backup buffers have been unreserved. Otherwise
1135 res
->res_dirty
= true;
1146 * vmw_resource_unreserve - Unreserve a resource previously reserved for
1147 * command submission.
1149 * @res: Pointer to the struct vmw_resource to unreserve.
1150 * @new_backup: Pointer to new backup buffer if command submission
1152 * @new_backup_offset: New backup offset if @new_backup is !NULL.
1154 * Currently unreserving a resource means putting it back on the device's
1155 * resource lru list, so that it can be evicted if necessary.
1157 void vmw_resource_unreserve(struct vmw_resource
*res
,
1158 struct vmw_dma_buffer
*new_backup
,
1159 unsigned long new_backup_offset
)
1161 struct vmw_private
*dev_priv
= res
->dev_priv
;
1163 if (!list_empty(&res
->lru_head
))
1166 if (new_backup
&& new_backup
!= res
->backup
) {
1169 lockdep_assert_held(&res
->backup
->base
.resv
->lock
.base
);
1170 list_del_init(&res
->mob_head
);
1171 vmw_dmabuf_unreference(&res
->backup
);
1174 res
->backup
= vmw_dmabuf_reference(new_backup
);
1175 lockdep_assert_held(&new_backup
->base
.resv
->lock
.base
);
1176 list_add_tail(&res
->mob_head
, &new_backup
->res_list
);
1179 res
->backup_offset
= new_backup_offset
;
1181 if (!res
->func
->may_evict
|| res
->id
== -1)
1184 write_lock(&dev_priv
->resource_lock
);
1185 list_add_tail(&res
->lru_head
,
1186 &res
->dev_priv
->res_lru
[res
->func
->res_type
]);
1187 write_unlock(&dev_priv
->resource_lock
);
1191 * vmw_resource_check_buffer - Check whether a backup buffer is needed
1192 * for a resource and in that case, allocate
1193 * one, reserve and validate it.
1195 * @res: The resource for which to allocate a backup buffer.
1196 * @interruptible: Whether any sleeps during allocation should be
1197 * performed while interruptible.
1198 * @val_buf: On successful return contains data about the
1199 * reserved and validated backup buffer.
1202 vmw_resource_check_buffer(struct vmw_resource
*res
,
1204 struct ttm_validate_buffer
*val_buf
)
1206 struct list_head val_list
;
1207 bool backup_dirty
= false;
1210 if (unlikely(res
->backup
== NULL
)) {
1211 ret
= vmw_resource_buf_alloc(res
, interruptible
);
1212 if (unlikely(ret
!= 0))
1216 INIT_LIST_HEAD(&val_list
);
1217 val_buf
->bo
= ttm_bo_reference(&res
->backup
->base
);
1218 list_add_tail(&val_buf
->head
, &val_list
);
1219 ret
= ttm_eu_reserve_buffers(NULL
, &val_list
, interruptible
);
1220 if (unlikely(ret
!= 0))
1221 goto out_no_reserve
;
1223 if (res
->func
->needs_backup
&& list_empty(&res
->mob_head
))
1226 backup_dirty
= res
->backup_dirty
;
1227 ret
= ttm_bo_validate(&res
->backup
->base
,
1228 res
->func
->backup_placement
,
1231 if (unlikely(ret
!= 0))
1232 goto out_no_validate
;
1237 ttm_eu_backoff_reservation(NULL
, &val_list
);
1239 ttm_bo_unref(&val_buf
->bo
);
1241 vmw_dmabuf_unreference(&res
->backup
);
1247 * vmw_resource_reserve - Reserve a resource for command submission
1249 * @res: The resource to reserve.
1251 * This function takes the resource off the LRU list and make sure
1252 * a backup buffer is present for guest-backed resources. However,
1253 * the buffer may not be bound to the resource at this point.
1256 int vmw_resource_reserve(struct vmw_resource
*res
, bool no_backup
)
1258 struct vmw_private
*dev_priv
= res
->dev_priv
;
1261 write_lock(&dev_priv
->resource_lock
);
1262 list_del_init(&res
->lru_head
);
1263 write_unlock(&dev_priv
->resource_lock
);
1265 if (res
->func
->needs_backup
&& res
->backup
== NULL
&&
1267 ret
= vmw_resource_buf_alloc(res
, true);
1268 if (unlikely(ret
!= 0))
1276 * vmw_resource_backoff_reservation - Unreserve and unreference a
1279 * @val_buf: Backup buffer information.
1282 vmw_resource_backoff_reservation(struct ttm_validate_buffer
*val_buf
)
1284 struct list_head val_list
;
1286 if (likely(val_buf
->bo
== NULL
))
1289 INIT_LIST_HEAD(&val_list
);
1290 list_add_tail(&val_buf
->head
, &val_list
);
1291 ttm_eu_backoff_reservation(NULL
, &val_list
);
1292 ttm_bo_unref(&val_buf
->bo
);
1296 * vmw_resource_do_evict - Evict a resource, and transfer its data
1297 * to a backup buffer.
1299 * @res: The resource to evict.
1300 * @interruptible: Whether to wait interruptible.
1302 int vmw_resource_do_evict(struct vmw_resource
*res
, bool interruptible
)
1304 struct ttm_validate_buffer val_buf
;
1305 const struct vmw_res_func
*func
= res
->func
;
1308 BUG_ON(!func
->may_evict
);
1311 ret
= vmw_resource_check_buffer(res
, interruptible
, &val_buf
);
1312 if (unlikely(ret
!= 0))
1315 if (unlikely(func
->unbind
!= NULL
&&
1316 (!func
->needs_backup
|| !list_empty(&res
->mob_head
)))) {
1317 ret
= func
->unbind(res
, res
->res_dirty
, &val_buf
);
1318 if (unlikely(ret
!= 0))
1320 list_del_init(&res
->mob_head
);
1322 ret
= func
->destroy(res
);
1323 res
->backup_dirty
= true;
1324 res
->res_dirty
= false;
1326 vmw_resource_backoff_reservation(&val_buf
);
1333 * vmw_resource_validate - Make a resource up-to-date and visible
1336 * @res: The resource to make visible to the device.
1338 * On succesful return, any backup DMA buffer pointed to by @res->backup will
1339 * be reserved and validated.
1340 * On hardware resource shortage, this function will repeatedly evict
1341 * resources of the same type until the validation succeeds.
1343 int vmw_resource_validate(struct vmw_resource
*res
)
1346 struct vmw_resource
*evict_res
;
1347 struct vmw_private
*dev_priv
= res
->dev_priv
;
1348 struct list_head
*lru_list
= &dev_priv
->res_lru
[res
->func
->res_type
];
1349 struct ttm_validate_buffer val_buf
;
1350 unsigned err_count
= 0;
1352 if (likely(!res
->func
->may_evict
))
1357 val_buf
.bo
= &res
->backup
->base
;
1359 ret
= vmw_resource_do_validate(res
, &val_buf
);
1360 if (likely(ret
!= -EBUSY
))
1363 write_lock(&dev_priv
->resource_lock
);
1364 if (list_empty(lru_list
) || !res
->func
->may_evict
) {
1365 DRM_ERROR("Out of device device resources "
1366 "for %s.\n", res
->func
->type_name
);
1368 write_unlock(&dev_priv
->resource_lock
);
1372 evict_res
= vmw_resource_reference
1373 (list_first_entry(lru_list
, struct vmw_resource
,
1375 list_del_init(&evict_res
->lru_head
);
1377 write_unlock(&dev_priv
->resource_lock
);
1379 ret
= vmw_resource_do_evict(evict_res
, true);
1380 if (unlikely(ret
!= 0)) {
1381 write_lock(&dev_priv
->resource_lock
);
1382 list_add_tail(&evict_res
->lru_head
, lru_list
);
1383 write_unlock(&dev_priv
->resource_lock
);
1384 if (ret
== -ERESTARTSYS
||
1385 ++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1386 vmw_resource_unreference(&evict_res
);
1387 goto out_no_validate
;
1391 vmw_resource_unreference(&evict_res
);
1394 if (unlikely(ret
!= 0))
1395 goto out_no_validate
;
1396 else if (!res
->func
->needs_backup
&& res
->backup
) {
1397 list_del_init(&res
->mob_head
);
1398 vmw_dmabuf_unreference(&res
->backup
);
1408 * vmw_fence_single_bo - Utility function to fence a single TTM buffer
1409 * object without unreserving it.
1411 * @bo: Pointer to the struct ttm_buffer_object to fence.
1412 * @fence: Pointer to the fence. If NULL, this function will
1413 * insert a fence into the command stream..
1415 * Contrary to the ttm_eu version of this function, it takes only
1416 * a single buffer object instead of a list, and it also doesn't
1417 * unreserve the buffer object, which needs to be done separately.
1419 void vmw_fence_single_bo(struct ttm_buffer_object
*bo
,
1420 struct vmw_fence_obj
*fence
)
1422 struct ttm_bo_device
*bdev
= bo
->bdev
;
1424 struct vmw_private
*dev_priv
=
1425 container_of(bdev
, struct vmw_private
, bdev
);
1427 if (fence
== NULL
) {
1428 vmw_execbuf_fence_commands(NULL
, dev_priv
, &fence
, NULL
);
1429 reservation_object_add_excl_fence(bo
->resv
, &fence
->base
);
1430 fence_put(&fence
->base
);
1432 reservation_object_add_excl_fence(bo
->resv
, &fence
->base
);
1436 * vmw_resource_move_notify - TTM move_notify_callback
1438 * @bo: The TTM buffer object about to move.
1439 * @mem: The truct ttm_mem_reg indicating to what memory
1440 * region the move is taking place.
1442 * Evicts the Guest Backed hardware resource if the backup
1443 * buffer is being moved out of MOB memory.
1444 * Note that this function should not race with the resource
1445 * validation code as long as it accesses only members of struct
1446 * resource that remain static while bo::res is !NULL and
1447 * while we have @bo reserved. struct resource::backup is *not* a
1448 * static member. The resource validation code will take care
1449 * to set @bo::res to NULL, while having @bo reserved when the
1450 * buffer is no longer bound to the resource, so @bo:res can be
1451 * used to determine whether there is a need to unbind and whether
1452 * it is safe to unbind.
1454 void vmw_resource_move_notify(struct ttm_buffer_object
*bo
,
1455 struct ttm_mem_reg
*mem
)
1457 struct vmw_dma_buffer
*dma_buf
;
1462 if (bo
->destroy
!= vmw_dmabuf_bo_free
&&
1463 bo
->destroy
!= vmw_user_dmabuf_destroy
)
1466 dma_buf
= container_of(bo
, struct vmw_dma_buffer
, base
);
1468 if (mem
->mem_type
!= VMW_PL_MOB
) {
1469 struct vmw_resource
*res
, *n
;
1470 struct ttm_validate_buffer val_buf
;
1474 list_for_each_entry_safe(res
, n
, &dma_buf
->res_list
, mob_head
) {
1476 if (unlikely(res
->func
->unbind
== NULL
))
1479 (void) res
->func
->unbind(res
, true, &val_buf
);
1480 res
->backup_dirty
= true;
1481 res
->res_dirty
= false;
1482 list_del_init(&res
->mob_head
);
1485 (void) ttm_bo_wait(bo
, false, false, false);
1490 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
1492 * @res: The resource being queried.
1494 bool vmw_resource_needs_backup(const struct vmw_resource
*res
)
1496 return res
->func
->needs_backup
;
1500 * vmw_resource_evict_type - Evict all resources of a specific type
1502 * @dev_priv: Pointer to a device private struct
1503 * @type: The resource type to evict
1505 * To avoid thrashing starvation or as part of the hibernation sequence,
1506 * try to evict all evictable resources of a specific type.
1508 static void vmw_resource_evict_type(struct vmw_private
*dev_priv
,
1509 enum vmw_res_type type
)
1511 struct list_head
*lru_list
= &dev_priv
->res_lru
[type
];
1512 struct vmw_resource
*evict_res
;
1513 unsigned err_count
= 0;
1517 write_lock(&dev_priv
->resource_lock
);
1519 if (list_empty(lru_list
))
1522 evict_res
= vmw_resource_reference(
1523 list_first_entry(lru_list
, struct vmw_resource
,
1525 list_del_init(&evict_res
->lru_head
);
1526 write_unlock(&dev_priv
->resource_lock
);
1528 ret
= vmw_resource_do_evict(evict_res
, false);
1529 if (unlikely(ret
!= 0)) {
1530 write_lock(&dev_priv
->resource_lock
);
1531 list_add_tail(&evict_res
->lru_head
, lru_list
);
1532 write_unlock(&dev_priv
->resource_lock
);
1533 if (++err_count
> VMW_RES_EVICT_ERR_COUNT
) {
1534 vmw_resource_unreference(&evict_res
);
1539 vmw_resource_unreference(&evict_res
);
1543 write_unlock(&dev_priv
->resource_lock
);
1547 * vmw_resource_evict_all - Evict all evictable resources
1549 * @dev_priv: Pointer to a device private struct
1551 * To avoid thrashing starvation or as part of the hibernation sequence,
1552 * evict all evictable resources. In particular this means that all
1553 * guest-backed resources that are registered with the device are
1554 * evicted and the OTable becomes clean.
1556 void vmw_resource_evict_all(struct vmw_private
*dev_priv
)
1558 enum vmw_res_type type
;
1560 mutex_lock(&dev_priv
->cmdbuf_mutex
);
1562 for (type
= 0; type
< vmw_res_max
; ++type
)
1563 vmw_resource_evict_type(dev_priv
, type
);
1565 mutex_unlock(&dev_priv
->cmdbuf_mutex
);