1 /**************************************************************************
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_reg.h"
30 #include <drm/ttm/ttm_bo_api.h>
31 #include <drm/ttm/ttm_placement.h>
33 #define VMW_RES_HT_ORDER 12
36 * struct vmw_resource_relocation - Relocation info for resources
38 * @head: List head for the software context's relocation list.
39 * @res: Non-ref-counted pointer to the resource.
40 * @offset: Offset of 4 byte entries into the command buffer where the
41 * id that needs fixup is located.
43 struct vmw_resource_relocation
{
44 struct list_head head
;
45 const struct vmw_resource
*res
;
50 * struct vmw_resource_val_node - Validation info for resources
52 * @head: List head for the software context's resource list.
53 * @hash: Hash entry for quick resouce to val_node lookup.
54 * @res: Ref-counted pointer to the resource.
55 * @switch_backup: Boolean whether to switch backup buffer on unreserve.
56 * @new_backup: Refcounted pointer to the new backup buffer.
57 * @staged_bindings: If @res is a context, tracks bindings set up during
58 * the command batch. Otherwise NULL.
59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll.
60 * @first_usage: Set to true the first time the resource is referenced in
62 * @no_buffer_needed: Resources do not need to allocate buffer backup on
63 * reservation. The command stream will provide one.
65 struct vmw_resource_val_node
{
66 struct list_head head
;
67 struct drm_hash_item hash
;
68 struct vmw_resource
*res
;
69 struct vmw_dma_buffer
*new_backup
;
70 struct vmw_ctx_binding_state
*staged_bindings
;
71 unsigned long new_backup_offset
;
73 bool no_buffer_needed
;
77 * struct vmw_cmd_entry - Describe a command for the verifier
79 * @user_allow: Whether allowed from the execbuf ioctl.
80 * @gb_disable: Whether disabled if guest-backed objects are available.
81 * @gb_enable: Whether enabled iff guest-backed objects are available.
83 struct vmw_cmd_entry
{
84 int (*func
) (struct vmw_private
*, struct vmw_sw_context
*,
91 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
93 (_gb_disable), (_gb_enable)}
96 * vmw_resource_unreserve - unreserve resources previously reserved for
99 * @list_head: list of resources to unreserve.
100 * @backoff: Whether command submission failed.
102 static void vmw_resource_list_unreserve(struct list_head
*list
,
105 struct vmw_resource_val_node
*val
;
107 list_for_each_entry(val
, list
, head
) {
108 struct vmw_resource
*res
= val
->res
;
109 struct vmw_dma_buffer
*new_backup
=
110 backoff
? NULL
: val
->new_backup
;
113 * Transfer staged context bindings to the
114 * persistent context binding tracker.
116 if (unlikely(val
->staged_bindings
)) {
118 vmw_context_binding_state_transfer
119 (val
->res
, val
->staged_bindings
);
121 kfree(val
->staged_bindings
);
122 val
->staged_bindings
= NULL
;
124 vmw_resource_unreserve(res
, new_backup
,
125 val
->new_backup_offset
);
126 vmw_dmabuf_unreference(&val
->new_backup
);
132 * vmw_resource_val_add - Add a resource to the software context's
133 * resource list if it's not already on it.
135 * @sw_context: Pointer to the software context.
136 * @res: Pointer to the resource.
137 * @p_node On successful return points to a valid pointer to a
138 * struct vmw_resource_val_node, if non-NULL on entry.
140 static int vmw_resource_val_add(struct vmw_sw_context
*sw_context
,
141 struct vmw_resource
*res
,
142 struct vmw_resource_val_node
**p_node
)
144 struct vmw_resource_val_node
*node
;
145 struct drm_hash_item
*hash
;
148 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) res
,
150 node
= container_of(hash
, struct vmw_resource_val_node
, hash
);
151 node
->first_usage
= false;
152 if (unlikely(p_node
!= NULL
))
157 node
= kzalloc(sizeof(*node
), GFP_KERNEL
);
158 if (unlikely(node
== NULL
)) {
159 DRM_ERROR("Failed to allocate a resource validation "
164 node
->hash
.key
= (unsigned long) res
;
165 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &node
->hash
);
166 if (unlikely(ret
!= 0)) {
167 DRM_ERROR("Failed to initialize a resource validation "
172 list_add_tail(&node
->head
, &sw_context
->resource_list
);
173 node
->res
= vmw_resource_reference(res
);
174 node
->first_usage
= true;
176 if (unlikely(p_node
!= NULL
))
183 * vmw_resource_context_res_add - Put resources previously bound to a context on
184 * the validation list
186 * @dev_priv: Pointer to a device private structure
187 * @sw_context: Pointer to a software context used for this command submission
188 * @ctx: Pointer to the context resource
190 * This function puts all resources that were previously bound to @ctx on
191 * the resource validation list. This is part of the context state reemission
193 static int vmw_resource_context_res_add(struct vmw_private
*dev_priv
,
194 struct vmw_sw_context
*sw_context
,
195 struct vmw_resource
*ctx
)
197 struct list_head
*binding_list
;
198 struct vmw_ctx_binding
*entry
;
200 struct vmw_resource
*res
;
202 mutex_lock(&dev_priv
->binding_mutex
);
203 binding_list
= vmw_context_binding_list(ctx
);
205 list_for_each_entry(entry
, binding_list
, ctx_list
) {
206 res
= vmw_resource_reference_unless_doomed(entry
->bi
.res
);
207 if (unlikely(res
== NULL
))
210 ret
= vmw_resource_val_add(sw_context
, entry
->bi
.res
, NULL
);
211 vmw_resource_unreference(&res
);
212 if (unlikely(ret
!= 0))
216 mutex_unlock(&dev_priv
->binding_mutex
);
221 * vmw_resource_relocation_add - Add a relocation to the relocation list
223 * @list: Pointer to head of relocation list.
224 * @res: The resource.
225 * @offset: Offset into the command buffer currently being parsed where the
226 * id that needs fixup is located. Granularity is 4 bytes.
228 static int vmw_resource_relocation_add(struct list_head
*list
,
229 const struct vmw_resource
*res
,
230 unsigned long offset
)
232 struct vmw_resource_relocation
*rel
;
234 rel
= kmalloc(sizeof(*rel
), GFP_KERNEL
);
235 if (unlikely(rel
== NULL
)) {
236 DRM_ERROR("Failed to allocate a resource relocation.\n");
241 rel
->offset
= offset
;
242 list_add_tail(&rel
->head
, list
);
248 * vmw_resource_relocations_free - Free all relocations on a list
250 * @list: Pointer to the head of the relocation list.
252 static void vmw_resource_relocations_free(struct list_head
*list
)
254 struct vmw_resource_relocation
*rel
, *n
;
256 list_for_each_entry_safe(rel
, n
, list
, head
) {
257 list_del(&rel
->head
);
263 * vmw_resource_relocations_apply - Apply all relocations on a list
265 * @cb: Pointer to the start of the command buffer bein patch. This need
266 * not be the same buffer as the one being parsed when the relocation
267 * list was built, but the contents must be the same modulo the
269 * @list: Pointer to the head of the relocation list.
271 static void vmw_resource_relocations_apply(uint32_t *cb
,
272 struct list_head
*list
)
274 struct vmw_resource_relocation
*rel
;
276 list_for_each_entry(rel
, list
, head
) {
277 if (likely(rel
->res
!= NULL
))
278 cb
[rel
->offset
] = rel
->res
->id
;
280 cb
[rel
->offset
] = SVGA_3D_CMD_NOP
;
284 static int vmw_cmd_invalid(struct vmw_private
*dev_priv
,
285 struct vmw_sw_context
*sw_context
,
286 SVGA3dCmdHeader
*header
)
288 return capable(CAP_SYS_ADMIN
) ? : -EINVAL
;
291 static int vmw_cmd_ok(struct vmw_private
*dev_priv
,
292 struct vmw_sw_context
*sw_context
,
293 SVGA3dCmdHeader
*header
)
299 * vmw_bo_to_validate_list - add a bo to a validate list
301 * @sw_context: The software context used for this command submission batch.
302 * @bo: The buffer object to add.
303 * @validate_as_mob: Validate this buffer as a MOB.
304 * @p_val_node: If non-NULL Will be updated with the validate node number
307 * Returns -EINVAL if the limit of number of buffer objects per command
308 * submission is reached.
310 static int vmw_bo_to_validate_list(struct vmw_sw_context
*sw_context
,
311 struct ttm_buffer_object
*bo
,
312 bool validate_as_mob
,
313 uint32_t *p_val_node
)
316 struct vmw_validate_buffer
*vval_buf
;
317 struct ttm_validate_buffer
*val_buf
;
318 struct drm_hash_item
*hash
;
321 if (likely(drm_ht_find_item(&sw_context
->res_ht
, (unsigned long) bo
,
323 vval_buf
= container_of(hash
, struct vmw_validate_buffer
,
325 if (unlikely(vval_buf
->validate_as_mob
!= validate_as_mob
)) {
326 DRM_ERROR("Inconsistent buffer usage.\n");
329 val_buf
= &vval_buf
->base
;
330 val_node
= vval_buf
- sw_context
->val_bufs
;
332 val_node
= sw_context
->cur_val_buf
;
333 if (unlikely(val_node
>= VMWGFX_MAX_VALIDATIONS
)) {
334 DRM_ERROR("Max number of DMA buffers per submission "
338 vval_buf
= &sw_context
->val_bufs
[val_node
];
339 vval_buf
->hash
.key
= (unsigned long) bo
;
340 ret
= drm_ht_insert_item(&sw_context
->res_ht
, &vval_buf
->hash
);
341 if (unlikely(ret
!= 0)) {
342 DRM_ERROR("Failed to initialize a buffer validation "
346 ++sw_context
->cur_val_buf
;
347 val_buf
= &vval_buf
->base
;
348 val_buf
->bo
= ttm_bo_reference(bo
);
349 val_buf
->reserved
= false;
350 list_add_tail(&val_buf
->head
, &sw_context
->validate_nodes
);
351 vval_buf
->validate_as_mob
= validate_as_mob
;
354 sw_context
->fence_flags
|= DRM_VMW_FENCE_FLAG_EXEC
;
357 *p_val_node
= val_node
;
363 * vmw_resources_reserve - Reserve all resources on the sw_context's
366 * @sw_context: Pointer to the software context.
368 * Note that since vmware's command submission currently is protected by
369 * the cmdbuf mutex, no fancy deadlock avoidance is required for resources,
370 * since only a single thread at once will attempt this.
372 static int vmw_resources_reserve(struct vmw_sw_context
*sw_context
)
374 struct vmw_resource_val_node
*val
;
377 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
378 struct vmw_resource
*res
= val
->res
;
380 ret
= vmw_resource_reserve(res
, val
->no_buffer_needed
);
381 if (unlikely(ret
!= 0))
385 struct ttm_buffer_object
*bo
= &res
->backup
->base
;
387 ret
= vmw_bo_to_validate_list
389 vmw_resource_needs_backup(res
), NULL
);
391 if (unlikely(ret
!= 0))
399 * vmw_resources_validate - Validate all resources on the sw_context's
402 * @sw_context: Pointer to the software context.
404 * Before this function is called, all resource backup buffers must have
407 static int vmw_resources_validate(struct vmw_sw_context
*sw_context
)
409 struct vmw_resource_val_node
*val
;
412 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
413 struct vmw_resource
*res
= val
->res
;
415 ret
= vmw_resource_validate(res
);
416 if (unlikely(ret
!= 0)) {
417 if (ret
!= -ERESTARTSYS
)
418 DRM_ERROR("Failed to validate resource.\n");
427 * vmw_cmd_res_reloc_add - Add a resource to a software context's
428 * relocation- and validation lists.
430 * @dev_priv: Pointer to a struct vmw_private identifying the device.
431 * @sw_context: Pointer to the software context.
432 * @res_type: Resource type.
433 * @id_loc: Pointer to where the id that needs translation is located.
434 * @res: Valid pointer to a struct vmw_resource.
435 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node
436 * used for this resource is returned here.
438 static int vmw_cmd_res_reloc_add(struct vmw_private
*dev_priv
,
439 struct vmw_sw_context
*sw_context
,
440 enum vmw_res_type res_type
,
442 struct vmw_resource
*res
,
443 struct vmw_resource_val_node
**p_val
)
446 struct vmw_resource_val_node
*node
;
449 ret
= vmw_resource_relocation_add(&sw_context
->res_relocations
,
451 id_loc
- sw_context
->buf_start
);
452 if (unlikely(ret
!= 0))
455 ret
= vmw_resource_val_add(sw_context
, res
, &node
);
456 if (unlikely(ret
!= 0))
459 if (res_type
== vmw_res_context
&& dev_priv
->has_mob
&&
463 * Put contexts first on the list to be able to exit
464 * list traversal for contexts early.
466 list_del(&node
->head
);
467 list_add(&node
->head
, &sw_context
->resource_list
);
469 ret
= vmw_resource_context_res_add(dev_priv
, sw_context
, res
);
470 if (unlikely(ret
!= 0))
472 node
->staged_bindings
=
473 kzalloc(sizeof(*node
->staged_bindings
), GFP_KERNEL
);
474 if (node
->staged_bindings
== NULL
) {
475 DRM_ERROR("Failed to allocate context binding "
479 INIT_LIST_HEAD(&node
->staged_bindings
->list
);
491 * vmw_cmd_res_check - Check that a resource is present and if so, put it
492 * on the resource validate list unless it's already there.
494 * @dev_priv: Pointer to a device private structure.
495 * @sw_context: Pointer to the software context.
496 * @res_type: Resource type.
497 * @converter: User-space visisble type specific information.
498 * @id_loc: Pointer to the location in the command buffer currently being
499 * parsed from where the user-space resource id handle is located.
500 * @p_val: Pointer to pointer to resource validalidation node. Populated
504 vmw_cmd_res_check(struct vmw_private
*dev_priv
,
505 struct vmw_sw_context
*sw_context
,
506 enum vmw_res_type res_type
,
507 const struct vmw_user_resource_conv
*converter
,
509 struct vmw_resource_val_node
**p_val
)
511 struct vmw_res_cache_entry
*rcache
=
512 &sw_context
->res_cache
[res_type
];
513 struct vmw_resource
*res
;
514 struct vmw_resource_val_node
*node
;
517 if (*id_loc
== SVGA3D_INVALID_ID
) {
520 if (res_type
== vmw_res_context
) {
521 DRM_ERROR("Illegal context invalid id.\n");
528 * Fastpath in case of repeated commands referencing the same
532 if (likely(rcache
->valid
&& *id_loc
== rcache
->handle
)) {
533 const struct vmw_resource
*res
= rcache
->res
;
535 rcache
->node
->first_usage
= false;
537 *p_val
= rcache
->node
;
539 return vmw_resource_relocation_add
540 (&sw_context
->res_relocations
, res
,
541 id_loc
- sw_context
->buf_start
);
544 ret
= vmw_user_resource_lookup_handle(dev_priv
,
545 sw_context
->fp
->tfile
,
549 if (unlikely(ret
!= 0)) {
550 DRM_ERROR("Could not find or use resource 0x%08x.\n",
556 rcache
->valid
= true;
558 rcache
->handle
= *id_loc
;
560 ret
= vmw_cmd_res_reloc_add(dev_priv
, sw_context
, res_type
, id_loc
,
562 if (unlikely(ret
!= 0))
568 vmw_resource_unreference(&res
);
572 BUG_ON(sw_context
->error_resource
!= NULL
);
573 sw_context
->error_resource
= res
;
579 * vmw_rebind_contexts - Rebind all resources previously bound to
580 * referenced contexts.
582 * @sw_context: Pointer to the software context.
584 * Rebind context binding points that have been scrubbed because of eviction.
586 static int vmw_rebind_contexts(struct vmw_sw_context
*sw_context
)
588 struct vmw_resource_val_node
*val
;
591 list_for_each_entry(val
, &sw_context
->resource_list
, head
) {
592 if (unlikely(!val
->staged_bindings
))
595 ret
= vmw_context_rebind_all(val
->res
);
596 if (unlikely(ret
!= 0)) {
597 if (ret
!= -ERESTARTSYS
)
598 DRM_ERROR("Failed to rebind context.\n");
607 * vmw_cmd_cid_check - Check a command header for valid context information.
609 * @dev_priv: Pointer to a device private structure.
610 * @sw_context: Pointer to the software context.
611 * @header: A command header with an embedded user-space context handle.
613 * Convenience function: Call vmw_cmd_res_check with the user-space context
614 * handle embedded in @header.
616 static int vmw_cmd_cid_check(struct vmw_private
*dev_priv
,
617 struct vmw_sw_context
*sw_context
,
618 SVGA3dCmdHeader
*header
)
621 SVGA3dCmdHeader header
;
625 cmd
= container_of(header
, struct vmw_cid_cmd
, header
);
626 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
627 user_context_converter
, &cmd
->cid
, NULL
);
630 static int vmw_cmd_set_render_target_check(struct vmw_private
*dev_priv
,
631 struct vmw_sw_context
*sw_context
,
632 SVGA3dCmdHeader
*header
)
635 SVGA3dCmdHeader header
;
636 SVGA3dCmdSetRenderTarget body
;
638 struct vmw_resource_val_node
*ctx_node
;
639 struct vmw_resource_val_node
*res_node
;
642 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
644 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
645 user_context_converter
, &cmd
->body
.cid
,
647 if (unlikely(ret
!= 0))
650 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
651 user_surface_converter
,
652 &cmd
->body
.target
.sid
, &res_node
);
653 if (unlikely(ret
!= 0))
656 if (dev_priv
->has_mob
) {
657 struct vmw_ctx_bindinfo bi
;
659 bi
.ctx
= ctx_node
->res
;
660 bi
.res
= res_node
? res_node
->res
: NULL
;
661 bi
.bt
= vmw_ctx_binding_rt
;
662 bi
.i1
.rt_type
= cmd
->body
.type
;
663 return vmw_context_binding_add(ctx_node
->staged_bindings
, &bi
);
669 static int vmw_cmd_surface_copy_check(struct vmw_private
*dev_priv
,
670 struct vmw_sw_context
*sw_context
,
671 SVGA3dCmdHeader
*header
)
674 SVGA3dCmdHeader header
;
675 SVGA3dCmdSurfaceCopy body
;
679 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
680 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
681 user_surface_converter
,
682 &cmd
->body
.src
.sid
, NULL
);
683 if (unlikely(ret
!= 0))
685 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
686 user_surface_converter
,
687 &cmd
->body
.dest
.sid
, NULL
);
690 static int vmw_cmd_stretch_blt_check(struct vmw_private
*dev_priv
,
691 struct vmw_sw_context
*sw_context
,
692 SVGA3dCmdHeader
*header
)
695 SVGA3dCmdHeader header
;
696 SVGA3dCmdSurfaceStretchBlt body
;
700 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
701 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
702 user_surface_converter
,
703 &cmd
->body
.src
.sid
, NULL
);
704 if (unlikely(ret
!= 0))
706 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
707 user_surface_converter
,
708 &cmd
->body
.dest
.sid
, NULL
);
711 static int vmw_cmd_blt_surf_screen_check(struct vmw_private
*dev_priv
,
712 struct vmw_sw_context
*sw_context
,
713 SVGA3dCmdHeader
*header
)
716 SVGA3dCmdHeader header
;
717 SVGA3dCmdBlitSurfaceToScreen body
;
720 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
722 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
723 user_surface_converter
,
724 &cmd
->body
.srcImage
.sid
, NULL
);
727 static int vmw_cmd_present_check(struct vmw_private
*dev_priv
,
728 struct vmw_sw_context
*sw_context
,
729 SVGA3dCmdHeader
*header
)
732 SVGA3dCmdHeader header
;
733 SVGA3dCmdPresent body
;
737 cmd
= container_of(header
, struct vmw_sid_cmd
, header
);
739 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
740 user_surface_converter
, &cmd
->body
.sid
,
745 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
747 * @dev_priv: The device private structure.
748 * @new_query_bo: The new buffer holding query results.
749 * @sw_context: The software context used for this command submission.
751 * This function checks whether @new_query_bo is suitable for holding
752 * query results, and if another buffer currently is pinned for query
753 * results. If so, the function prepares the state of @sw_context for
754 * switching pinned buffers after successful submission of the current
757 static int vmw_query_bo_switch_prepare(struct vmw_private
*dev_priv
,
758 struct ttm_buffer_object
*new_query_bo
,
759 struct vmw_sw_context
*sw_context
)
761 struct vmw_res_cache_entry
*ctx_entry
=
762 &sw_context
->res_cache
[vmw_res_context
];
765 BUG_ON(!ctx_entry
->valid
);
766 sw_context
->last_query_ctx
= ctx_entry
->res
;
768 if (unlikely(new_query_bo
!= sw_context
->cur_query_bo
)) {
770 if (unlikely(new_query_bo
->num_pages
> 4)) {
771 DRM_ERROR("Query buffer too large.\n");
775 if (unlikely(sw_context
->cur_query_bo
!= NULL
)) {
776 sw_context
->needs_post_query_barrier
= true;
777 ret
= vmw_bo_to_validate_list(sw_context
,
778 sw_context
->cur_query_bo
,
779 dev_priv
->has_mob
, NULL
);
780 if (unlikely(ret
!= 0))
783 sw_context
->cur_query_bo
= new_query_bo
;
785 ret
= vmw_bo_to_validate_list(sw_context
,
786 dev_priv
->dummy_query_bo
,
787 dev_priv
->has_mob
, NULL
);
788 if (unlikely(ret
!= 0))
798 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
800 * @dev_priv: The device private structure.
801 * @sw_context: The software context used for this command submission batch.
803 * This function will check if we're switching query buffers, and will then,
804 * issue a dummy occlusion query wait used as a query barrier. When the fence
805 * object following that query wait has signaled, we are sure that all
806 * preceding queries have finished, and the old query buffer can be unpinned.
807 * However, since both the new query buffer and the old one are fenced with
808 * that fence, we can do an asynchronus unpin now, and be sure that the
809 * old query buffer won't be moved until the fence has signaled.
811 * As mentioned above, both the new - and old query buffers need to be fenced
812 * using a sequence emitted *after* calling this function.
814 static void vmw_query_bo_switch_commit(struct vmw_private
*dev_priv
,
815 struct vmw_sw_context
*sw_context
)
818 * The validate list should still hold references to all
822 if (sw_context
->needs_post_query_barrier
) {
823 struct vmw_res_cache_entry
*ctx_entry
=
824 &sw_context
->res_cache
[vmw_res_context
];
825 struct vmw_resource
*ctx
;
828 BUG_ON(!ctx_entry
->valid
);
829 ctx
= ctx_entry
->res
;
831 ret
= vmw_fifo_emit_dummy_query(dev_priv
, ctx
->id
);
833 if (unlikely(ret
!= 0))
834 DRM_ERROR("Out of fifo space for dummy query.\n");
837 if (dev_priv
->pinned_bo
!= sw_context
->cur_query_bo
) {
838 if (dev_priv
->pinned_bo
) {
839 vmw_bo_pin(dev_priv
->pinned_bo
, false);
840 ttm_bo_unref(&dev_priv
->pinned_bo
);
843 if (!sw_context
->needs_post_query_barrier
) {
844 vmw_bo_pin(sw_context
->cur_query_bo
, true);
847 * We pin also the dummy_query_bo buffer so that we
848 * don't need to validate it when emitting
849 * dummy queries in context destroy paths.
852 vmw_bo_pin(dev_priv
->dummy_query_bo
, true);
853 dev_priv
->dummy_query_bo_pinned
= true;
855 BUG_ON(sw_context
->last_query_ctx
== NULL
);
856 dev_priv
->query_cid
= sw_context
->last_query_ctx
->id
;
857 dev_priv
->query_cid_valid
= true;
858 dev_priv
->pinned_bo
=
859 ttm_bo_reference(sw_context
->cur_query_bo
);
865 * vmw_translate_mob_pointer - Prepare to translate a user-space buffer
866 * handle to a MOB id.
868 * @dev_priv: Pointer to a device private structure.
869 * @sw_context: The software context used for this command batch validation.
870 * @id: Pointer to the user-space handle to be translated.
871 * @vmw_bo_p: Points to a location that, on successful return will carry
872 * a reference-counted pointer to the DMA buffer identified by the
873 * user-space handle in @id.
875 * This function saves information needed to translate a user-space buffer
876 * handle to a MOB id. The translation does not take place immediately, but
877 * during a call to vmw_apply_relocations(). This function builds a relocation
878 * list and a list of buffers to validate. The former needs to be freed using
879 * either vmw_apply_relocations() or vmw_free_relocations(). The latter
880 * needs to be freed using vmw_clear_validations.
882 static int vmw_translate_mob_ptr(struct vmw_private
*dev_priv
,
883 struct vmw_sw_context
*sw_context
,
885 struct vmw_dma_buffer
**vmw_bo_p
)
887 struct vmw_dma_buffer
*vmw_bo
= NULL
;
888 struct ttm_buffer_object
*bo
;
889 uint32_t handle
= *id
;
890 struct vmw_relocation
*reloc
;
893 ret
= vmw_user_dmabuf_lookup(sw_context
->fp
->tfile
, handle
, &vmw_bo
);
894 if (unlikely(ret
!= 0)) {
895 DRM_ERROR("Could not find or use MOB buffer.\n");
900 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
901 DRM_ERROR("Max number relocations per submission"
907 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
909 reloc
->location
= NULL
;
911 ret
= vmw_bo_to_validate_list(sw_context
, bo
, true, &reloc
->index
);
912 if (unlikely(ret
!= 0))
919 vmw_dmabuf_unreference(&vmw_bo
);
925 * vmw_translate_guest_pointer - Prepare to translate a user-space buffer
926 * handle to a valid SVGAGuestPtr
928 * @dev_priv: Pointer to a device private structure.
929 * @sw_context: The software context used for this command batch validation.
930 * @ptr: Pointer to the user-space handle to be translated.
931 * @vmw_bo_p: Points to a location that, on successful return will carry
932 * a reference-counted pointer to the DMA buffer identified by the
933 * user-space handle in @id.
935 * This function saves information needed to translate a user-space buffer
936 * handle to a valid SVGAGuestPtr. The translation does not take place
937 * immediately, but during a call to vmw_apply_relocations().
938 * This function builds a relocation list and a list of buffers to validate.
939 * The former needs to be freed using either vmw_apply_relocations() or
940 * vmw_free_relocations(). The latter needs to be freed using
941 * vmw_clear_validations.
943 static int vmw_translate_guest_ptr(struct vmw_private
*dev_priv
,
944 struct vmw_sw_context
*sw_context
,
946 struct vmw_dma_buffer
**vmw_bo_p
)
948 struct vmw_dma_buffer
*vmw_bo
= NULL
;
949 struct ttm_buffer_object
*bo
;
950 uint32_t handle
= ptr
->gmrId
;
951 struct vmw_relocation
*reloc
;
954 ret
= vmw_user_dmabuf_lookup(sw_context
->fp
->tfile
, handle
, &vmw_bo
);
955 if (unlikely(ret
!= 0)) {
956 DRM_ERROR("Could not find or use GMR region.\n");
961 if (unlikely(sw_context
->cur_reloc
>= VMWGFX_MAX_RELOCATIONS
)) {
962 DRM_ERROR("Max number relocations per submission"
968 reloc
= &sw_context
->relocs
[sw_context
->cur_reloc
++];
969 reloc
->location
= ptr
;
971 ret
= vmw_bo_to_validate_list(sw_context
, bo
, false, &reloc
->index
);
972 if (unlikely(ret
!= 0))
979 vmw_dmabuf_unreference(&vmw_bo
);
985 * vmw_cmd_begin_gb_query - validate a SVGA_3D_CMD_BEGIN_GB_QUERY command.
987 * @dev_priv: Pointer to a device private struct.
988 * @sw_context: The software context used for this command submission.
989 * @header: Pointer to the command header in the command stream.
991 static int vmw_cmd_begin_gb_query(struct vmw_private
*dev_priv
,
992 struct vmw_sw_context
*sw_context
,
993 SVGA3dCmdHeader
*header
)
995 struct vmw_begin_gb_query_cmd
{
996 SVGA3dCmdHeader header
;
997 SVGA3dCmdBeginGBQuery q
;
1000 cmd
= container_of(header
, struct vmw_begin_gb_query_cmd
,
1003 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1004 user_context_converter
, &cmd
->q
.cid
,
1009 * vmw_cmd_begin_query - validate a SVGA_3D_CMD_BEGIN_QUERY command.
1011 * @dev_priv: Pointer to a device private struct.
1012 * @sw_context: The software context used for this command submission.
1013 * @header: Pointer to the command header in the command stream.
1015 static int vmw_cmd_begin_query(struct vmw_private
*dev_priv
,
1016 struct vmw_sw_context
*sw_context
,
1017 SVGA3dCmdHeader
*header
)
1019 struct vmw_begin_query_cmd
{
1020 SVGA3dCmdHeader header
;
1021 SVGA3dCmdBeginQuery q
;
1024 cmd
= container_of(header
, struct vmw_begin_query_cmd
,
1027 if (unlikely(dev_priv
->has_mob
)) {
1029 SVGA3dCmdHeader header
;
1030 SVGA3dCmdBeginGBQuery q
;
1033 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1035 gb_cmd
.header
.id
= SVGA_3D_CMD_BEGIN_GB_QUERY
;
1036 gb_cmd
.header
.size
= cmd
->header
.size
;
1037 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1038 gb_cmd
.q
.type
= cmd
->q
.type
;
1040 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1041 return vmw_cmd_begin_gb_query(dev_priv
, sw_context
, header
);
1044 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1045 user_context_converter
, &cmd
->q
.cid
,
1050 * vmw_cmd_end_gb_query - validate a SVGA_3D_CMD_END_GB_QUERY command.
1052 * @dev_priv: Pointer to a device private struct.
1053 * @sw_context: The software context used for this command submission.
1054 * @header: Pointer to the command header in the command stream.
1056 static int vmw_cmd_end_gb_query(struct vmw_private
*dev_priv
,
1057 struct vmw_sw_context
*sw_context
,
1058 SVGA3dCmdHeader
*header
)
1060 struct vmw_dma_buffer
*vmw_bo
;
1061 struct vmw_query_cmd
{
1062 SVGA3dCmdHeader header
;
1063 SVGA3dCmdEndGBQuery q
;
1067 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1068 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1069 if (unlikely(ret
!= 0))
1072 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1075 if (unlikely(ret
!= 0))
1078 ret
= vmw_query_bo_switch_prepare(dev_priv
, &vmw_bo
->base
, sw_context
);
1080 vmw_dmabuf_unreference(&vmw_bo
);
1085 * vmw_cmd_end_query - validate a SVGA_3D_CMD_END_QUERY command.
1087 * @dev_priv: Pointer to a device private struct.
1088 * @sw_context: The software context used for this command submission.
1089 * @header: Pointer to the command header in the command stream.
1091 static int vmw_cmd_end_query(struct vmw_private
*dev_priv
,
1092 struct vmw_sw_context
*sw_context
,
1093 SVGA3dCmdHeader
*header
)
1095 struct vmw_dma_buffer
*vmw_bo
;
1096 struct vmw_query_cmd
{
1097 SVGA3dCmdHeader header
;
1098 SVGA3dCmdEndQuery q
;
1102 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1103 if (dev_priv
->has_mob
) {
1105 SVGA3dCmdHeader header
;
1106 SVGA3dCmdEndGBQuery q
;
1109 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1111 gb_cmd
.header
.id
= SVGA_3D_CMD_END_GB_QUERY
;
1112 gb_cmd
.header
.size
= cmd
->header
.size
;
1113 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1114 gb_cmd
.q
.type
= cmd
->q
.type
;
1115 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1116 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1118 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1119 return vmw_cmd_end_gb_query(dev_priv
, sw_context
, header
);
1122 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1123 if (unlikely(ret
!= 0))
1126 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1127 &cmd
->q
.guestResult
,
1129 if (unlikely(ret
!= 0))
1132 ret
= vmw_query_bo_switch_prepare(dev_priv
, &vmw_bo
->base
, sw_context
);
1134 vmw_dmabuf_unreference(&vmw_bo
);
1139 * vmw_cmd_wait_gb_query - validate a SVGA_3D_CMD_WAIT_GB_QUERY command.
1141 * @dev_priv: Pointer to a device private struct.
1142 * @sw_context: The software context used for this command submission.
1143 * @header: Pointer to the command header in the command stream.
1145 static int vmw_cmd_wait_gb_query(struct vmw_private
*dev_priv
,
1146 struct vmw_sw_context
*sw_context
,
1147 SVGA3dCmdHeader
*header
)
1149 struct vmw_dma_buffer
*vmw_bo
;
1150 struct vmw_query_cmd
{
1151 SVGA3dCmdHeader header
;
1152 SVGA3dCmdWaitForGBQuery q
;
1156 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1157 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1158 if (unlikely(ret
!= 0))
1161 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
,
1164 if (unlikely(ret
!= 0))
1167 vmw_dmabuf_unreference(&vmw_bo
);
1172 * vmw_cmd_wait_query - validate a SVGA_3D_CMD_WAIT_QUERY command.
1174 * @dev_priv: Pointer to a device private struct.
1175 * @sw_context: The software context used for this command submission.
1176 * @header: Pointer to the command header in the command stream.
1178 static int vmw_cmd_wait_query(struct vmw_private
*dev_priv
,
1179 struct vmw_sw_context
*sw_context
,
1180 SVGA3dCmdHeader
*header
)
1182 struct vmw_dma_buffer
*vmw_bo
;
1183 struct vmw_query_cmd
{
1184 SVGA3dCmdHeader header
;
1185 SVGA3dCmdWaitForQuery q
;
1189 cmd
= container_of(header
, struct vmw_query_cmd
, header
);
1190 if (dev_priv
->has_mob
) {
1192 SVGA3dCmdHeader header
;
1193 SVGA3dCmdWaitForGBQuery q
;
1196 BUG_ON(sizeof(gb_cmd
) != sizeof(*cmd
));
1198 gb_cmd
.header
.id
= SVGA_3D_CMD_WAIT_FOR_GB_QUERY
;
1199 gb_cmd
.header
.size
= cmd
->header
.size
;
1200 gb_cmd
.q
.cid
= cmd
->q
.cid
;
1201 gb_cmd
.q
.type
= cmd
->q
.type
;
1202 gb_cmd
.q
.mobid
= cmd
->q
.guestResult
.gmrId
;
1203 gb_cmd
.q
.offset
= cmd
->q
.guestResult
.offset
;
1205 memcpy(cmd
, &gb_cmd
, sizeof(*cmd
));
1206 return vmw_cmd_wait_gb_query(dev_priv
, sw_context
, header
);
1209 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1210 if (unlikely(ret
!= 0))
1213 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1214 &cmd
->q
.guestResult
,
1216 if (unlikely(ret
!= 0))
1219 vmw_dmabuf_unreference(&vmw_bo
);
1223 static int vmw_cmd_dma(struct vmw_private
*dev_priv
,
1224 struct vmw_sw_context
*sw_context
,
1225 SVGA3dCmdHeader
*header
)
1227 struct vmw_dma_buffer
*vmw_bo
= NULL
;
1228 struct vmw_surface
*srf
= NULL
;
1229 struct vmw_dma_cmd
{
1230 SVGA3dCmdHeader header
;
1231 SVGA3dCmdSurfaceDMA dma
;
1234 SVGA3dCmdSurfaceDMASuffix
*suffix
;
1237 cmd
= container_of(header
, struct vmw_dma_cmd
, header
);
1238 suffix
= (SVGA3dCmdSurfaceDMASuffix
*)((unsigned long) &cmd
->dma
+
1239 header
->size
- sizeof(*suffix
));
1241 /* Make sure device and verifier stays in sync. */
1242 if (unlikely(suffix
->suffixSize
!= sizeof(*suffix
))) {
1243 DRM_ERROR("Invalid DMA suffix size.\n");
1247 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1248 &cmd
->dma
.guest
.ptr
,
1250 if (unlikely(ret
!= 0))
1253 /* Make sure DMA doesn't cross BO boundaries. */
1254 bo_size
= vmw_bo
->base
.num_pages
* PAGE_SIZE
;
1255 if (unlikely(cmd
->dma
.guest
.ptr
.offset
> bo_size
)) {
1256 DRM_ERROR("Invalid DMA offset.\n");
1260 bo_size
-= cmd
->dma
.guest
.ptr
.offset
;
1261 if (unlikely(suffix
->maximumOffset
> bo_size
))
1262 suffix
->maximumOffset
= bo_size
;
1264 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1265 user_surface_converter
, &cmd
->dma
.host
.sid
,
1267 if (unlikely(ret
!= 0)) {
1268 if (unlikely(ret
!= -ERESTARTSYS
))
1269 DRM_ERROR("could not find surface for DMA.\n");
1270 goto out_no_surface
;
1273 srf
= vmw_res_to_srf(sw_context
->res_cache
[vmw_res_surface
].res
);
1275 vmw_kms_cursor_snoop(srf
, sw_context
->fp
->tfile
, &vmw_bo
->base
,
1279 vmw_dmabuf_unreference(&vmw_bo
);
1283 static int vmw_cmd_draw(struct vmw_private
*dev_priv
,
1284 struct vmw_sw_context
*sw_context
,
1285 SVGA3dCmdHeader
*header
)
1287 struct vmw_draw_cmd
{
1288 SVGA3dCmdHeader header
;
1289 SVGA3dCmdDrawPrimitives body
;
1291 SVGA3dVertexDecl
*decl
= (SVGA3dVertexDecl
*)(
1292 (unsigned long)header
+ sizeof(*cmd
));
1293 SVGA3dPrimitiveRange
*range
;
1298 ret
= vmw_cmd_cid_check(dev_priv
, sw_context
, header
);
1299 if (unlikely(ret
!= 0))
1302 cmd
= container_of(header
, struct vmw_draw_cmd
, header
);
1303 maxnum
= (header
->size
- sizeof(cmd
->body
)) / sizeof(*decl
);
1305 if (unlikely(cmd
->body
.numVertexDecls
> maxnum
)) {
1306 DRM_ERROR("Illegal number of vertex declarations.\n");
1310 for (i
= 0; i
< cmd
->body
.numVertexDecls
; ++i
, ++decl
) {
1311 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1312 user_surface_converter
,
1313 &decl
->array
.surfaceId
, NULL
);
1314 if (unlikely(ret
!= 0))
1318 maxnum
= (header
->size
- sizeof(cmd
->body
) -
1319 cmd
->body
.numVertexDecls
* sizeof(*decl
)) / sizeof(*range
);
1320 if (unlikely(cmd
->body
.numRanges
> maxnum
)) {
1321 DRM_ERROR("Illegal number of index ranges.\n");
1325 range
= (SVGA3dPrimitiveRange
*) decl
;
1326 for (i
= 0; i
< cmd
->body
.numRanges
; ++i
, ++range
) {
1327 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1328 user_surface_converter
,
1329 &range
->indexArray
.surfaceId
, NULL
);
1330 if (unlikely(ret
!= 0))
1337 static int vmw_cmd_tex_state(struct vmw_private
*dev_priv
,
1338 struct vmw_sw_context
*sw_context
,
1339 SVGA3dCmdHeader
*header
)
1341 struct vmw_tex_state_cmd
{
1342 SVGA3dCmdHeader header
;
1343 SVGA3dCmdSetTextureState state
;
1346 SVGA3dTextureState
*last_state
= (SVGA3dTextureState
*)
1347 ((unsigned long) header
+ header
->size
+ sizeof(header
));
1348 SVGA3dTextureState
*cur_state
= (SVGA3dTextureState
*)
1349 ((unsigned long) header
+ sizeof(struct vmw_tex_state_cmd
));
1350 struct vmw_resource_val_node
*ctx_node
;
1351 struct vmw_resource_val_node
*res_node
;
1354 cmd
= container_of(header
, struct vmw_tex_state_cmd
,
1357 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1358 user_context_converter
, &cmd
->state
.cid
,
1360 if (unlikely(ret
!= 0))
1363 for (; cur_state
< last_state
; ++cur_state
) {
1364 if (likely(cur_state
->name
!= SVGA3D_TS_BIND_TEXTURE
))
1367 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1368 user_surface_converter
,
1369 &cur_state
->value
, &res_node
);
1370 if (unlikely(ret
!= 0))
1373 if (dev_priv
->has_mob
) {
1374 struct vmw_ctx_bindinfo bi
;
1376 bi
.ctx
= ctx_node
->res
;
1377 bi
.res
= res_node
? res_node
->res
: NULL
;
1378 bi
.bt
= vmw_ctx_binding_tex
;
1379 bi
.i1
.texture_stage
= cur_state
->stage
;
1380 vmw_context_binding_add(ctx_node
->staged_bindings
,
1388 static int vmw_cmd_check_define_gmrfb(struct vmw_private
*dev_priv
,
1389 struct vmw_sw_context
*sw_context
,
1392 struct vmw_dma_buffer
*vmw_bo
;
1397 SVGAFifoCmdDefineGMRFB body
;
1400 ret
= vmw_translate_guest_ptr(dev_priv
, sw_context
,
1403 if (unlikely(ret
!= 0))
1406 vmw_dmabuf_unreference(&vmw_bo
);
1412 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1414 * @dev_priv: Pointer to a device private struct.
1415 * @sw_context: The software context being used for this batch.
1416 * @res_type: The resource type.
1417 * @converter: Information about user-space binding for this resource type.
1418 * @res_id: Pointer to the user-space resource handle in the command stream.
1419 * @buf_id: Pointer to the user-space backup buffer handle in the command
1421 * @backup_offset: Offset of backup into MOB.
1423 * This function prepares for registering a switch of backup buffers
1424 * in the resource metadata just prior to unreserving.
1426 static int vmw_cmd_switch_backup(struct vmw_private
*dev_priv
,
1427 struct vmw_sw_context
*sw_context
,
1428 enum vmw_res_type res_type
,
1429 const struct vmw_user_resource_conv
1433 unsigned long backup_offset
)
1436 struct vmw_dma_buffer
*dma_buf
;
1437 struct vmw_resource_val_node
*val_node
;
1439 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, res_type
,
1440 converter
, res_id
, &val_node
);
1441 if (unlikely(ret
!= 0))
1444 ret
= vmw_translate_mob_ptr(dev_priv
, sw_context
, buf_id
, &dma_buf
);
1445 if (unlikely(ret
!= 0))
1448 if (val_node
->first_usage
)
1449 val_node
->no_buffer_needed
= true;
1451 vmw_dmabuf_unreference(&val_node
->new_backup
);
1452 val_node
->new_backup
= dma_buf
;
1453 val_node
->new_backup_offset
= backup_offset
;
1459 * vmw_cmd_bind_gb_surface - Validate an SVGA_3D_CMD_BIND_GB_SURFACE
1462 * @dev_priv: Pointer to a device private struct.
1463 * @sw_context: The software context being used for this batch.
1464 * @header: Pointer to the command header in the command stream.
1466 static int vmw_cmd_bind_gb_surface(struct vmw_private
*dev_priv
,
1467 struct vmw_sw_context
*sw_context
,
1468 SVGA3dCmdHeader
*header
)
1470 struct vmw_bind_gb_surface_cmd
{
1471 SVGA3dCmdHeader header
;
1472 SVGA3dCmdBindGBSurface body
;
1475 cmd
= container_of(header
, struct vmw_bind_gb_surface_cmd
, header
);
1477 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_surface
,
1478 user_surface_converter
,
1479 &cmd
->body
.sid
, &cmd
->body
.mobid
,
1484 * vmw_cmd_update_gb_image - Validate an SVGA_3D_CMD_UPDATE_GB_IMAGE
1487 * @dev_priv: Pointer to a device private struct.
1488 * @sw_context: The software context being used for this batch.
1489 * @header: Pointer to the command header in the command stream.
1491 static int vmw_cmd_update_gb_image(struct vmw_private
*dev_priv
,
1492 struct vmw_sw_context
*sw_context
,
1493 SVGA3dCmdHeader
*header
)
1495 struct vmw_gb_surface_cmd
{
1496 SVGA3dCmdHeader header
;
1497 SVGA3dCmdUpdateGBImage body
;
1500 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1502 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1503 user_surface_converter
,
1504 &cmd
->body
.image
.sid
, NULL
);
1508 * vmw_cmd_update_gb_surface - Validate an SVGA_3D_CMD_UPDATE_GB_SURFACE
1511 * @dev_priv: Pointer to a device private struct.
1512 * @sw_context: The software context being used for this batch.
1513 * @header: Pointer to the command header in the command stream.
1515 static int vmw_cmd_update_gb_surface(struct vmw_private
*dev_priv
,
1516 struct vmw_sw_context
*sw_context
,
1517 SVGA3dCmdHeader
*header
)
1519 struct vmw_gb_surface_cmd
{
1520 SVGA3dCmdHeader header
;
1521 SVGA3dCmdUpdateGBSurface body
;
1524 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1526 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1527 user_surface_converter
,
1528 &cmd
->body
.sid
, NULL
);
1532 * vmw_cmd_readback_gb_image - Validate an SVGA_3D_CMD_READBACK_GB_IMAGE
1535 * @dev_priv: Pointer to a device private struct.
1536 * @sw_context: The software context being used for this batch.
1537 * @header: Pointer to the command header in the command stream.
1539 static int vmw_cmd_readback_gb_image(struct vmw_private
*dev_priv
,
1540 struct vmw_sw_context
*sw_context
,
1541 SVGA3dCmdHeader
*header
)
1543 struct vmw_gb_surface_cmd
{
1544 SVGA3dCmdHeader header
;
1545 SVGA3dCmdReadbackGBImage body
;
1548 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1550 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1551 user_surface_converter
,
1552 &cmd
->body
.image
.sid
, NULL
);
1556 * vmw_cmd_readback_gb_surface - Validate an SVGA_3D_CMD_READBACK_GB_SURFACE
1559 * @dev_priv: Pointer to a device private struct.
1560 * @sw_context: The software context being used for this batch.
1561 * @header: Pointer to the command header in the command stream.
1563 static int vmw_cmd_readback_gb_surface(struct vmw_private
*dev_priv
,
1564 struct vmw_sw_context
*sw_context
,
1565 SVGA3dCmdHeader
*header
)
1567 struct vmw_gb_surface_cmd
{
1568 SVGA3dCmdHeader header
;
1569 SVGA3dCmdReadbackGBSurface body
;
1572 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1574 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1575 user_surface_converter
,
1576 &cmd
->body
.sid
, NULL
);
1580 * vmw_cmd_invalidate_gb_image - Validate an SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1583 * @dev_priv: Pointer to a device private struct.
1584 * @sw_context: The software context being used for this batch.
1585 * @header: Pointer to the command header in the command stream.
1587 static int vmw_cmd_invalidate_gb_image(struct vmw_private
*dev_priv
,
1588 struct vmw_sw_context
*sw_context
,
1589 SVGA3dCmdHeader
*header
)
1591 struct vmw_gb_surface_cmd
{
1592 SVGA3dCmdHeader header
;
1593 SVGA3dCmdInvalidateGBImage body
;
1596 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1598 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1599 user_surface_converter
,
1600 &cmd
->body
.image
.sid
, NULL
);
1604 * vmw_cmd_invalidate_gb_surface - Validate an
1605 * SVGA_3D_CMD_INVALIDATE_GB_SURFACE command
1607 * @dev_priv: Pointer to a device private struct.
1608 * @sw_context: The software context being used for this batch.
1609 * @header: Pointer to the command header in the command stream.
1611 static int vmw_cmd_invalidate_gb_surface(struct vmw_private
*dev_priv
,
1612 struct vmw_sw_context
*sw_context
,
1613 SVGA3dCmdHeader
*header
)
1615 struct vmw_gb_surface_cmd
{
1616 SVGA3dCmdHeader header
;
1617 SVGA3dCmdInvalidateGBSurface body
;
1620 cmd
= container_of(header
, struct vmw_gb_surface_cmd
, header
);
1622 return vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_surface
,
1623 user_surface_converter
,
1624 &cmd
->body
.sid
, NULL
);
1629 * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
1632 * @dev_priv: Pointer to a device private struct.
1633 * @sw_context: The software context being used for this batch.
1634 * @header: Pointer to the command header in the command stream.
1636 static int vmw_cmd_shader_define(struct vmw_private
*dev_priv
,
1637 struct vmw_sw_context
*sw_context
,
1638 SVGA3dCmdHeader
*header
)
1640 struct vmw_shader_define_cmd
{
1641 SVGA3dCmdHeader header
;
1642 SVGA3dCmdDefineShader body
;
1646 struct vmw_resource_val_node
*val
;
1648 cmd
= container_of(header
, struct vmw_shader_define_cmd
,
1651 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1652 user_context_converter
, &cmd
->body
.cid
,
1654 if (unlikely(ret
!= 0))
1657 if (unlikely(!dev_priv
->has_mob
))
1660 size
= cmd
->header
.size
- sizeof(cmd
->body
);
1661 ret
= vmw_compat_shader_add(dev_priv
,
1662 vmw_context_res_man(val
->res
),
1663 cmd
->body
.shid
, cmd
+ 1,
1664 cmd
->body
.type
, size
,
1665 &sw_context
->staged_cmd_res
);
1666 if (unlikely(ret
!= 0))
1669 return vmw_resource_relocation_add(&sw_context
->res_relocations
,
1670 NULL
, &cmd
->header
.id
-
1671 sw_context
->buf_start
);
1677 * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
1680 * @dev_priv: Pointer to a device private struct.
1681 * @sw_context: The software context being used for this batch.
1682 * @header: Pointer to the command header in the command stream.
1684 static int vmw_cmd_shader_destroy(struct vmw_private
*dev_priv
,
1685 struct vmw_sw_context
*sw_context
,
1686 SVGA3dCmdHeader
*header
)
1688 struct vmw_shader_destroy_cmd
{
1689 SVGA3dCmdHeader header
;
1690 SVGA3dCmdDestroyShader body
;
1693 struct vmw_resource_val_node
*val
;
1695 cmd
= container_of(header
, struct vmw_shader_destroy_cmd
,
1698 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1699 user_context_converter
, &cmd
->body
.cid
,
1701 if (unlikely(ret
!= 0))
1704 if (unlikely(!dev_priv
->has_mob
))
1707 ret
= vmw_compat_shader_remove(vmw_context_res_man(val
->res
),
1710 &sw_context
->staged_cmd_res
);
1711 if (unlikely(ret
!= 0))
1714 return vmw_resource_relocation_add(&sw_context
->res_relocations
,
1715 NULL
, &cmd
->header
.id
-
1716 sw_context
->buf_start
);
1722 * vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
1725 * @dev_priv: Pointer to a device private struct.
1726 * @sw_context: The software context being used for this batch.
1727 * @header: Pointer to the command header in the command stream.
1729 static int vmw_cmd_set_shader(struct vmw_private
*dev_priv
,
1730 struct vmw_sw_context
*sw_context
,
1731 SVGA3dCmdHeader
*header
)
1733 struct vmw_set_shader_cmd
{
1734 SVGA3dCmdHeader header
;
1735 SVGA3dCmdSetShader body
;
1737 struct vmw_resource_val_node
*ctx_node
, *res_node
= NULL
;
1738 struct vmw_ctx_bindinfo bi
;
1739 struct vmw_resource
*res
= NULL
;
1742 cmd
= container_of(header
, struct vmw_set_shader_cmd
,
1745 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1746 user_context_converter
, &cmd
->body
.cid
,
1748 if (unlikely(ret
!= 0))
1751 if (!dev_priv
->has_mob
)
1754 if (cmd
->body
.shid
!= SVGA3D_INVALID_ID
) {
1755 res
= vmw_compat_shader_lookup
1756 (vmw_context_res_man(ctx_node
->res
),
1761 ret
= vmw_cmd_res_reloc_add(dev_priv
, sw_context
,
1763 &cmd
->body
.shid
, res
,
1765 vmw_resource_unreference(&res
);
1766 if (unlikely(ret
!= 0))
1772 ret
= vmw_cmd_res_check(dev_priv
, sw_context
,
1774 user_shader_converter
,
1775 &cmd
->body
.shid
, &res_node
);
1776 if (unlikely(ret
!= 0))
1780 bi
.ctx
= ctx_node
->res
;
1781 bi
.res
= res_node
? res_node
->res
: NULL
;
1782 bi
.bt
= vmw_ctx_binding_shader
;
1783 bi
.i1
.shader_type
= cmd
->body
.type
;
1784 return vmw_context_binding_add(ctx_node
->staged_bindings
, &bi
);
1788 * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
1791 * @dev_priv: Pointer to a device private struct.
1792 * @sw_context: The software context being used for this batch.
1793 * @header: Pointer to the command header in the command stream.
1795 static int vmw_cmd_set_shader_const(struct vmw_private
*dev_priv
,
1796 struct vmw_sw_context
*sw_context
,
1797 SVGA3dCmdHeader
*header
)
1799 struct vmw_set_shader_const_cmd
{
1800 SVGA3dCmdHeader header
;
1801 SVGA3dCmdSetShaderConst body
;
1805 cmd
= container_of(header
, struct vmw_set_shader_const_cmd
,
1808 ret
= vmw_cmd_res_check(dev_priv
, sw_context
, vmw_res_context
,
1809 user_context_converter
, &cmd
->body
.cid
,
1811 if (unlikely(ret
!= 0))
1814 if (dev_priv
->has_mob
)
1815 header
->id
= SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
;
1821 * vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
1824 * @dev_priv: Pointer to a device private struct.
1825 * @sw_context: The software context being used for this batch.
1826 * @header: Pointer to the command header in the command stream.
1828 static int vmw_cmd_bind_gb_shader(struct vmw_private
*dev_priv
,
1829 struct vmw_sw_context
*sw_context
,
1830 SVGA3dCmdHeader
*header
)
1832 struct vmw_bind_gb_shader_cmd
{
1833 SVGA3dCmdHeader header
;
1834 SVGA3dCmdBindGBShader body
;
1837 cmd
= container_of(header
, struct vmw_bind_gb_shader_cmd
,
1840 return vmw_cmd_switch_backup(dev_priv
, sw_context
, vmw_res_shader
,
1841 user_shader_converter
,
1842 &cmd
->body
.shid
, &cmd
->body
.mobid
,
1843 cmd
->body
.offsetInBytes
);
1846 static int vmw_cmd_check_not_3d(struct vmw_private
*dev_priv
,
1847 struct vmw_sw_context
*sw_context
,
1848 void *buf
, uint32_t *size
)
1850 uint32_t size_remaining
= *size
;
1853 cmd_id
= le32_to_cpu(((uint32_t *)buf
)[0]);
1855 case SVGA_CMD_UPDATE
:
1856 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate
);
1858 case SVGA_CMD_DEFINE_GMRFB
:
1859 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB
);
1861 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN
:
1862 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
1864 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB
:
1865 *size
= sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen
);
1868 DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id
);
1872 if (*size
> size_remaining
) {
1873 DRM_ERROR("Invalid SVGA command (size mismatch):"
1878 if (unlikely(!sw_context
->kernel
)) {
1879 DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id
);
1883 if (cmd_id
== SVGA_CMD_DEFINE_GMRFB
)
1884 return vmw_cmd_check_define_gmrfb(dev_priv
, sw_context
, buf
);
1889 static const struct vmw_cmd_entry vmw_cmd_entries
[SVGA_3D_CMD_MAX
] = {
1890 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE
, &vmw_cmd_invalid
,
1891 false, false, false),
1892 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY
, &vmw_cmd_invalid
,
1893 false, false, false),
1894 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY
, &vmw_cmd_surface_copy_check
,
1895 true, false, false),
1896 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT
, &vmw_cmd_stretch_blt_check
,
1897 true, false, false),
1898 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA
, &vmw_cmd_dma
,
1899 true, false, false),
1900 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE
, &vmw_cmd_invalid
,
1901 false, false, false),
1902 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY
, &vmw_cmd_invalid
,
1903 false, false, false),
1904 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM
, &vmw_cmd_cid_check
,
1905 true, false, false),
1906 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE
, &vmw_cmd_cid_check
,
1907 true, false, false),
1908 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE
, &vmw_cmd_cid_check
,
1909 true, false, false),
1910 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET
,
1911 &vmw_cmd_set_render_target_check
, true, false, false),
1912 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE
, &vmw_cmd_tex_state
,
1913 true, false, false),
1914 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL
, &vmw_cmd_cid_check
,
1915 true, false, false),
1916 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA
, &vmw_cmd_cid_check
,
1917 true, false, false),
1918 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED
, &vmw_cmd_cid_check
,
1919 true, false, false),
1920 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT
, &vmw_cmd_cid_check
,
1921 true, false, false),
1922 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE
, &vmw_cmd_cid_check
,
1923 true, false, false),
1924 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR
, &vmw_cmd_cid_check
,
1925 true, false, false),
1926 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT
, &vmw_cmd_present_check
,
1927 false, false, false),
1928 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE
, &vmw_cmd_shader_define
,
1929 true, false, false),
1930 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY
, &vmw_cmd_shader_destroy
,
1931 true, false, false),
1932 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER
, &vmw_cmd_set_shader
,
1933 true, false, false),
1934 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST
, &vmw_cmd_set_shader_const
,
1935 true, false, false),
1936 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES
, &vmw_cmd_draw
,
1937 true, false, false),
1938 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT
, &vmw_cmd_cid_check
,
1939 true, false, false),
1940 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY
, &vmw_cmd_begin_query
,
1941 true, false, false),
1942 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY
, &vmw_cmd_end_query
,
1943 true, false, false),
1944 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY
, &vmw_cmd_wait_query
,
1945 true, false, false),
1946 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK
, &vmw_cmd_ok
,
1947 true, false, false),
1948 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN
,
1949 &vmw_cmd_blt_surf_screen_check
, false, false, false),
1950 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2
, &vmw_cmd_invalid
,
1951 false, false, false),
1952 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS
, &vmw_cmd_invalid
,
1953 false, false, false),
1954 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE
, &vmw_cmd_invalid
,
1955 false, false, false),
1956 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE
, &vmw_cmd_invalid
,
1957 false, false, false),
1958 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA
, &vmw_cmd_invalid
,
1959 false, false, false),
1960 VMW_CMD_DEF(SVGA_3D_CMD_SET_UNITY_SURFACE_COOKIE
, &vmw_cmd_invalid
,
1961 false, false, false),
1962 VMW_CMD_DEF(SVGA_3D_CMD_OPEN_CONTEXT_SURFACE
, &vmw_cmd_invalid
,
1963 false, false, false),
1964 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_BITBLT
, &vmw_cmd_invalid
,
1965 false, false, false),
1966 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_TRANSBLT
, &vmw_cmd_invalid
,
1967 false, false, false),
1968 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_STRETCHBLT
, &vmw_cmd_invalid
,
1969 false, false, false),
1970 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_COLORFILL
, &vmw_cmd_invalid
,
1971 false, false, false),
1972 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_ALPHABLEND
, &vmw_cmd_invalid
,
1973 false, false, false),
1974 VMW_CMD_DEF(SVGA_3D_CMD_LOGICOPS_CLEARTYPEBLEND
, &vmw_cmd_invalid
,
1975 false, false, false),
1976 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE
, &vmw_cmd_invalid
,
1977 false, false, true),
1978 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE
, &vmw_cmd_invalid
,
1979 false, false, true),
1980 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB
, &vmw_cmd_invalid
,
1981 false, false, true),
1982 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB
, &vmw_cmd_invalid
,
1983 false, false, true),
1984 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB
, &vmw_cmd_invalid
,
1985 false, false, true),
1986 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING
, &vmw_cmd_invalid
,
1987 false, false, true),
1988 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE
, &vmw_cmd_invalid
,
1989 false, false, true),
1990 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE
, &vmw_cmd_invalid
,
1991 false, false, true),
1992 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE
, &vmw_cmd_bind_gb_surface
,
1994 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE
, &vmw_cmd_invalid
,
1995 false, false, true),
1996 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE
, &vmw_cmd_update_gb_image
,
1998 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE
,
1999 &vmw_cmd_update_gb_surface
, true, false, true),
2000 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE
,
2001 &vmw_cmd_readback_gb_image
, true, false, true),
2002 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE
,
2003 &vmw_cmd_readback_gb_surface
, true, false, true),
2004 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE
,
2005 &vmw_cmd_invalidate_gb_image
, true, false, true),
2006 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE
,
2007 &vmw_cmd_invalidate_gb_surface
, true, false, true),
2008 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT
, &vmw_cmd_invalid
,
2009 false, false, true),
2010 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT
, &vmw_cmd_invalid
,
2011 false, false, true),
2012 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT
, &vmw_cmd_invalid
,
2013 false, false, true),
2014 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT
, &vmw_cmd_invalid
,
2015 false, false, true),
2016 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT
, &vmw_cmd_invalid
,
2017 false, false, true),
2018 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER
, &vmw_cmd_invalid
,
2019 false, false, true),
2020 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER
, &vmw_cmd_bind_gb_shader
,
2022 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER
, &vmw_cmd_invalid
,
2023 false, false, true),
2024 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64
, &vmw_cmd_invalid
,
2025 false, false, false),
2026 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY
, &vmw_cmd_begin_gb_query
,
2028 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY
, &vmw_cmd_end_gb_query
,
2030 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY
, &vmw_cmd_wait_gb_query
,
2032 VMW_CMD_DEF(SVGA_3D_CMD_NOP
, &vmw_cmd_ok
,
2034 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART
, &vmw_cmd_invalid
,
2035 false, false, true),
2036 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART
, &vmw_cmd_invalid
,
2037 false, false, true),
2038 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART
, &vmw_cmd_invalid
,
2039 false, false, true),
2040 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE
, &vmw_cmd_invalid
,
2041 false, false, true),
2042 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
2043 false, false, true),
2044 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET
, &vmw_cmd_invalid
,
2045 false, false, true),
2046 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET
, &vmw_cmd_invalid
,
2047 false, false, true),
2048 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET
, &vmw_cmd_invalid
,
2049 false, false, true),
2050 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
2051 false, false, true),
2052 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL
, &vmw_cmd_invalid
,
2053 false, false, true),
2054 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE
, &vmw_cmd_cid_check
,
2058 static int vmw_cmd_check(struct vmw_private
*dev_priv
,
2059 struct vmw_sw_context
*sw_context
,
2060 void *buf
, uint32_t *size
)
2063 uint32_t size_remaining
= *size
;
2064 SVGA3dCmdHeader
*header
= (SVGA3dCmdHeader
*) buf
;
2066 const struct vmw_cmd_entry
*entry
;
2067 bool gb
= dev_priv
->capabilities
& SVGA_CAP_GBOBJECTS
;
2069 cmd_id
= le32_to_cpu(((uint32_t *)buf
)[0]);
2070 /* Handle any none 3D commands */
2071 if (unlikely(cmd_id
< SVGA_CMD_MAX
))
2072 return vmw_cmd_check_not_3d(dev_priv
, sw_context
, buf
, size
);
2075 cmd_id
= le32_to_cpu(header
->id
);
2076 *size
= le32_to_cpu(header
->size
) + sizeof(SVGA3dCmdHeader
);
2078 cmd_id
-= SVGA_3D_CMD_BASE
;
2079 if (unlikely(*size
> size_remaining
))
2082 if (unlikely(cmd_id
>= SVGA_3D_CMD_MAX
- SVGA_3D_CMD_BASE
))
2085 entry
= &vmw_cmd_entries
[cmd_id
];
2086 if (unlikely(!entry
->func
))
2089 if (unlikely(!entry
->user_allow
&& !sw_context
->kernel
))
2090 goto out_privileged
;
2092 if (unlikely(entry
->gb_disable
&& gb
))
2095 if (unlikely(entry
->gb_enable
&& !gb
))
2098 ret
= entry
->func(dev_priv
, sw_context
, header
);
2099 if (unlikely(ret
!= 0))
2104 DRM_ERROR("Invalid SVGA3D command: %d\n",
2105 cmd_id
+ SVGA_3D_CMD_BASE
);
2108 DRM_ERROR("Privileged SVGA3D command: %d\n",
2109 cmd_id
+ SVGA_3D_CMD_BASE
);
2112 DRM_ERROR("Deprecated (disallowed) SVGA3D command: %d\n",
2113 cmd_id
+ SVGA_3D_CMD_BASE
);
2116 DRM_ERROR("SVGA3D command: %d not supported by virtual hardware.\n",
2117 cmd_id
+ SVGA_3D_CMD_BASE
);
2121 static int vmw_cmd_check_all(struct vmw_private
*dev_priv
,
2122 struct vmw_sw_context
*sw_context
,
2126 int32_t cur_size
= size
;
2129 sw_context
->buf_start
= buf
;
2131 while (cur_size
> 0) {
2133 ret
= vmw_cmd_check(dev_priv
, sw_context
, buf
, &size
);
2134 if (unlikely(ret
!= 0))
2136 buf
= (void *)((unsigned long) buf
+ size
);
2140 if (unlikely(cur_size
!= 0)) {
2141 DRM_ERROR("Command verifier out of sync.\n");
2148 static void vmw_free_relocations(struct vmw_sw_context
*sw_context
)
2150 sw_context
->cur_reloc
= 0;
2153 static void vmw_apply_relocations(struct vmw_sw_context
*sw_context
)
2156 struct vmw_relocation
*reloc
;
2157 struct ttm_validate_buffer
*validate
;
2158 struct ttm_buffer_object
*bo
;
2160 for (i
= 0; i
< sw_context
->cur_reloc
; ++i
) {
2161 reloc
= &sw_context
->relocs
[i
];
2162 validate
= &sw_context
->val_bufs
[reloc
->index
].base
;
2164 switch (bo
->mem
.mem_type
) {
2166 reloc
->location
->offset
+= bo
->offset
;
2167 reloc
->location
->gmrId
= SVGA_GMR_FRAMEBUFFER
;
2170 reloc
->location
->gmrId
= bo
->mem
.start
;
2173 *reloc
->mob_loc
= bo
->mem
.start
;
2179 vmw_free_relocations(sw_context
);
2183 * vmw_resource_list_unrefererence - Free up a resource list and unreference
2184 * all resources referenced by it.
2186 * @list: The resource list.
2188 static void vmw_resource_list_unreference(struct list_head
*list
)
2190 struct vmw_resource_val_node
*val
, *val_next
;
2193 * Drop references to resources held during command submission.
2196 list_for_each_entry_safe(val
, val_next
, list
, head
) {
2197 list_del_init(&val
->head
);
2198 vmw_resource_unreference(&val
->res
);
2199 if (unlikely(val
->staged_bindings
))
2200 kfree(val
->staged_bindings
);
2205 static void vmw_clear_validations(struct vmw_sw_context
*sw_context
)
2207 struct vmw_validate_buffer
*entry
, *next
;
2208 struct vmw_resource_val_node
*val
;
2211 * Drop references to DMA buffers held during command submission.
2213 list_for_each_entry_safe(entry
, next
, &sw_context
->validate_nodes
,
2215 list_del(&entry
->base
.head
);
2216 ttm_bo_unref(&entry
->base
.bo
);
2217 (void) drm_ht_remove_item(&sw_context
->res_ht
, &entry
->hash
);
2218 sw_context
->cur_val_buf
--;
2220 BUG_ON(sw_context
->cur_val_buf
!= 0);
2222 list_for_each_entry(val
, &sw_context
->resource_list
, head
)
2223 (void) drm_ht_remove_item(&sw_context
->res_ht
, &val
->hash
);
2226 static int vmw_validate_single_buffer(struct vmw_private
*dev_priv
,
2227 struct ttm_buffer_object
*bo
,
2228 bool validate_as_mob
)
2234 * Don't validate pinned buffers.
2237 if (bo
== dev_priv
->pinned_bo
||
2238 (bo
== dev_priv
->dummy_query_bo
&&
2239 dev_priv
->dummy_query_bo_pinned
))
2242 if (validate_as_mob
)
2243 return ttm_bo_validate(bo
, &vmw_mob_placement
, true, false);
2246 * Put BO in VRAM if there is space, otherwise as a GMR.
2247 * If there is no space in VRAM and GMR ids are all used up,
2248 * start evicting GMRs to make room. If the DMA buffer can't be
2249 * used as a GMR, this will return -ENOMEM.
2252 ret
= ttm_bo_validate(bo
, &vmw_vram_gmr_placement
, true, false);
2253 if (likely(ret
== 0 || ret
== -ERESTARTSYS
))
2257 * If that failed, try VRAM again, this time evicting
2258 * previous contents.
2261 DRM_INFO("Falling through to VRAM.\n");
2262 ret
= ttm_bo_validate(bo
, &vmw_vram_placement
, true, false);
2266 static int vmw_validate_buffers(struct vmw_private
*dev_priv
,
2267 struct vmw_sw_context
*sw_context
)
2269 struct vmw_validate_buffer
*entry
;
2272 list_for_each_entry(entry
, &sw_context
->validate_nodes
, base
.head
) {
2273 ret
= vmw_validate_single_buffer(dev_priv
, entry
->base
.bo
,
2274 entry
->validate_as_mob
);
2275 if (unlikely(ret
!= 0))
2281 static int vmw_resize_cmd_bounce(struct vmw_sw_context
*sw_context
,
2284 if (likely(sw_context
->cmd_bounce_size
>= size
))
2287 if (sw_context
->cmd_bounce_size
== 0)
2288 sw_context
->cmd_bounce_size
= VMWGFX_CMD_BOUNCE_INIT_SIZE
;
2290 while (sw_context
->cmd_bounce_size
< size
) {
2291 sw_context
->cmd_bounce_size
=
2292 PAGE_ALIGN(sw_context
->cmd_bounce_size
+
2293 (sw_context
->cmd_bounce_size
>> 1));
2296 if (sw_context
->cmd_bounce
!= NULL
)
2297 vfree(sw_context
->cmd_bounce
);
2299 sw_context
->cmd_bounce
= vmalloc(sw_context
->cmd_bounce_size
);
2301 if (sw_context
->cmd_bounce
== NULL
) {
2302 DRM_ERROR("Failed to allocate command bounce buffer.\n");
2303 sw_context
->cmd_bounce_size
= 0;
2311 * vmw_execbuf_fence_commands - create and submit a command stream fence
2313 * Creates a fence object and submits a command stream marker.
2314 * If this fails for some reason, We sync the fifo and return NULL.
2315 * It is then safe to fence buffers with a NULL pointer.
2317 * If @p_handle is not NULL @file_priv must also not be NULL. Creates
2318 * a userspace handle if @p_handle is not NULL, otherwise not.
2321 int vmw_execbuf_fence_commands(struct drm_file
*file_priv
,
2322 struct vmw_private
*dev_priv
,
2323 struct vmw_fence_obj
**p_fence
,
2328 bool synced
= false;
2330 /* p_handle implies file_priv. */
2331 BUG_ON(p_handle
!= NULL
&& file_priv
== NULL
);
2333 ret
= vmw_fifo_send_fence(dev_priv
, &sequence
);
2334 if (unlikely(ret
!= 0)) {
2335 DRM_ERROR("Fence submission error. Syncing.\n");
2339 if (p_handle
!= NULL
)
2340 ret
= vmw_user_fence_create(file_priv
, dev_priv
->fman
,
2342 DRM_VMW_FENCE_FLAG_EXEC
,
2345 ret
= vmw_fence_create(dev_priv
->fman
, sequence
,
2346 DRM_VMW_FENCE_FLAG_EXEC
,
2349 if (unlikely(ret
!= 0 && !synced
)) {
2350 (void) vmw_fallback_wait(dev_priv
, false, false,
2352 VMW_FENCE_WAIT_TIMEOUT
);
2360 * vmw_execbuf_copy_fence_user - copy fence object information to
2363 * @dev_priv: Pointer to a vmw_private struct.
2364 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
2365 * @ret: Return value from fence object creation.
2366 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to
2367 * which the information should be copied.
2368 * @fence: Pointer to the fenc object.
2369 * @fence_handle: User-space fence handle.
2371 * This function copies fence information to user-space. If copying fails,
2372 * The user-space struct drm_vmw_fence_rep::error member is hopefully
2373 * left untouched, and if it's preloaded with an -EFAULT by user-space,
2374 * the error will hopefully be detected.
2375 * Also if copying fails, user-space will be unable to signal the fence
2376 * object so we wait for it immediately, and then unreference the
2377 * user-space reference.
2380 vmw_execbuf_copy_fence_user(struct vmw_private
*dev_priv
,
2381 struct vmw_fpriv
*vmw_fp
,
2383 struct drm_vmw_fence_rep __user
*user_fence_rep
,
2384 struct vmw_fence_obj
*fence
,
2385 uint32_t fence_handle
)
2387 struct drm_vmw_fence_rep fence_rep
;
2389 if (user_fence_rep
== NULL
)
2392 memset(&fence_rep
, 0, sizeof(fence_rep
));
2394 fence_rep
.error
= ret
;
2396 BUG_ON(fence
== NULL
);
2398 fence_rep
.handle
= fence_handle
;
2399 fence_rep
.seqno
= fence
->seqno
;
2400 vmw_update_seqno(dev_priv
, &dev_priv
->fifo
);
2401 fence_rep
.passed_seqno
= dev_priv
->last_read_seqno
;
2405 * copy_to_user errors will be detected by user space not
2406 * seeing fence_rep::error filled in. Typically
2407 * user-space would have pre-set that member to -EFAULT.
2409 ret
= copy_to_user(user_fence_rep
, &fence_rep
,
2413 * User-space lost the fence object. We need to sync
2414 * and unreference the handle.
2416 if (unlikely(ret
!= 0) && (fence_rep
.error
== 0)) {
2417 ttm_ref_object_base_unref(vmw_fp
->tfile
,
2418 fence_handle
, TTM_REF_USAGE
);
2419 DRM_ERROR("Fence copy error. Syncing.\n");
2420 (void) vmw_fence_obj_wait(fence
, fence
->signal_mask
,
2422 VMW_FENCE_WAIT_TIMEOUT
);
2428 int vmw_execbuf_process(struct drm_file
*file_priv
,
2429 struct vmw_private
*dev_priv
,
2430 void __user
*user_commands
,
2431 void *kernel_commands
,
2432 uint32_t command_size
,
2433 uint64_t throttle_us
,
2434 struct drm_vmw_fence_rep __user
*user_fence_rep
,
2435 struct vmw_fence_obj
**out_fence
)
2437 struct vmw_sw_context
*sw_context
= &dev_priv
->ctx
;
2438 struct vmw_fence_obj
*fence
= NULL
;
2439 struct vmw_resource
*error_resource
;
2440 struct list_head resource_list
;
2441 struct ww_acquire_ctx ticket
;
2446 ret
= mutex_lock_interruptible(&dev_priv
->cmdbuf_mutex
);
2447 if (unlikely(ret
!= 0))
2448 return -ERESTARTSYS
;
2450 if (kernel_commands
== NULL
) {
2451 sw_context
->kernel
= false;
2453 ret
= vmw_resize_cmd_bounce(sw_context
, command_size
);
2454 if (unlikely(ret
!= 0))
2458 ret
= copy_from_user(sw_context
->cmd_bounce
,
2459 user_commands
, command_size
);
2461 if (unlikely(ret
!= 0)) {
2463 DRM_ERROR("Failed copying commands.\n");
2466 kernel_commands
= sw_context
->cmd_bounce
;
2468 sw_context
->kernel
= true;
2470 sw_context
->fp
= vmw_fpriv(file_priv
);
2471 sw_context
->cur_reloc
= 0;
2472 sw_context
->cur_val_buf
= 0;
2473 sw_context
->fence_flags
= 0;
2474 INIT_LIST_HEAD(&sw_context
->resource_list
);
2475 sw_context
->cur_query_bo
= dev_priv
->pinned_bo
;
2476 sw_context
->last_query_ctx
= NULL
;
2477 sw_context
->needs_post_query_barrier
= false;
2478 memset(sw_context
->res_cache
, 0, sizeof(sw_context
->res_cache
));
2479 INIT_LIST_HEAD(&sw_context
->validate_nodes
);
2480 INIT_LIST_HEAD(&sw_context
->res_relocations
);
2481 if (!sw_context
->res_ht_initialized
) {
2482 ret
= drm_ht_create(&sw_context
->res_ht
, VMW_RES_HT_ORDER
);
2483 if (unlikely(ret
!= 0))
2485 sw_context
->res_ht_initialized
= true;
2487 INIT_LIST_HEAD(&sw_context
->staged_cmd_res
);
2489 INIT_LIST_HEAD(&resource_list
);
2490 ret
= vmw_cmd_check_all(dev_priv
, sw_context
, kernel_commands
,
2492 if (unlikely(ret
!= 0))
2495 ret
= vmw_resources_reserve(sw_context
);
2496 if (unlikely(ret
!= 0))
2499 ret
= ttm_eu_reserve_buffers(&ticket
, &sw_context
->validate_nodes
);
2500 if (unlikely(ret
!= 0))
2503 ret
= vmw_validate_buffers(dev_priv
, sw_context
);
2504 if (unlikely(ret
!= 0))
2507 ret
= vmw_resources_validate(sw_context
);
2508 if (unlikely(ret
!= 0))
2512 ret
= vmw_wait_lag(dev_priv
, &dev_priv
->fifo
.marker_queue
,
2515 if (unlikely(ret
!= 0))
2519 ret
= mutex_lock_interruptible(&dev_priv
->binding_mutex
);
2520 if (unlikely(ret
!= 0)) {
2525 if (dev_priv
->has_mob
) {
2526 ret
= vmw_rebind_contexts(sw_context
);
2527 if (unlikely(ret
!= 0))
2528 goto out_unlock_binding
;
2531 cmd
= vmw_fifo_reserve(dev_priv
, command_size
);
2532 if (unlikely(cmd
== NULL
)) {
2533 DRM_ERROR("Failed reserving fifo space for commands.\n");
2535 goto out_unlock_binding
;
2538 vmw_apply_relocations(sw_context
);
2539 memcpy(cmd
, kernel_commands
, command_size
);
2541 vmw_resource_relocations_apply(cmd
, &sw_context
->res_relocations
);
2542 vmw_resource_relocations_free(&sw_context
->res_relocations
);
2544 vmw_fifo_commit(dev_priv
, command_size
);
2546 vmw_query_bo_switch_commit(dev_priv
, sw_context
);
2547 ret
= vmw_execbuf_fence_commands(file_priv
, dev_priv
,
2549 (user_fence_rep
) ? &handle
: NULL
);
2551 * This error is harmless, because if fence submission fails,
2552 * vmw_fifo_send_fence will sync. The error will be propagated to
2553 * user-space in @fence_rep
2557 DRM_ERROR("Fence submission error. Syncing.\n");
2559 vmw_resource_list_unreserve(&sw_context
->resource_list
, false);
2560 mutex_unlock(&dev_priv
->binding_mutex
);
2562 ttm_eu_fence_buffer_objects(&ticket
, &sw_context
->validate_nodes
,
2565 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
2566 !dev_priv
->query_cid_valid
))
2567 __vmw_execbuf_release_pinned_bo(dev_priv
, fence
);
2569 vmw_clear_validations(sw_context
);
2570 vmw_execbuf_copy_fence_user(dev_priv
, vmw_fpriv(file_priv
), ret
,
2571 user_fence_rep
, fence
, handle
);
2573 /* Don't unreference when handing fence out */
2574 if (unlikely(out_fence
!= NULL
)) {
2577 } else if (likely(fence
!= NULL
)) {
2578 vmw_fence_obj_unreference(&fence
);
2581 list_splice_init(&sw_context
->resource_list
, &resource_list
);
2582 vmw_cmdbuf_res_commit(&sw_context
->staged_cmd_res
);
2583 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
2586 * Unreference resources outside of the cmdbuf_mutex to
2587 * avoid deadlocks in resource destruction paths.
2589 vmw_resource_list_unreference(&resource_list
);
2594 mutex_unlock(&dev_priv
->binding_mutex
);
2596 ttm_eu_backoff_reservation(&ticket
, &sw_context
->validate_nodes
);
2598 vmw_resource_list_unreserve(&sw_context
->resource_list
, true);
2599 vmw_resource_relocations_free(&sw_context
->res_relocations
);
2600 vmw_free_relocations(sw_context
);
2601 vmw_clear_validations(sw_context
);
2602 if (unlikely(dev_priv
->pinned_bo
!= NULL
&&
2603 !dev_priv
->query_cid_valid
))
2604 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
2606 list_splice_init(&sw_context
->resource_list
, &resource_list
);
2607 error_resource
= sw_context
->error_resource
;
2608 sw_context
->error_resource
= NULL
;
2609 vmw_cmdbuf_res_revert(&sw_context
->staged_cmd_res
);
2610 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
2613 * Unreference resources outside of the cmdbuf_mutex to
2614 * avoid deadlocks in resource destruction paths.
2616 vmw_resource_list_unreference(&resource_list
);
2617 if (unlikely(error_resource
!= NULL
))
2618 vmw_resource_unreference(&error_resource
);
2624 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
2626 * @dev_priv: The device private structure.
2628 * This function is called to idle the fifo and unpin the query buffer
2629 * if the normal way to do this hits an error, which should typically be
2632 static void vmw_execbuf_unpin_panic(struct vmw_private
*dev_priv
)
2634 DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
2636 (void) vmw_fallback_wait(dev_priv
, false, true, 0, false, 10*HZ
);
2637 vmw_bo_pin(dev_priv
->pinned_bo
, false);
2638 vmw_bo_pin(dev_priv
->dummy_query_bo
, false);
2639 dev_priv
->dummy_query_bo_pinned
= false;
2644 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2647 * @dev_priv: The device private structure.
2648 * @fence: If non-NULL should point to a struct vmw_fence_obj issued
2649 * _after_ a query barrier that flushes all queries touching the current
2650 * buffer pointed to by @dev_priv->pinned_bo
2652 * This function should be used to unpin the pinned query bo, or
2653 * as a query barrier when we need to make sure that all queries have
2654 * finished before the next fifo command. (For example on hardware
2655 * context destructions where the hardware may otherwise leak unfinished
2658 * This function does not return any failure codes, but make attempts
2659 * to do safe unpinning in case of errors.
2661 * The function will synchronize on the previous query barrier, and will
2662 * thus not finish until that barrier has executed.
2664 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread
2665 * before calling this function.
2667 void __vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
,
2668 struct vmw_fence_obj
*fence
)
2671 struct list_head validate_list
;
2672 struct ttm_validate_buffer pinned_val
, query_val
;
2673 struct vmw_fence_obj
*lfence
= NULL
;
2674 struct ww_acquire_ctx ticket
;
2676 if (dev_priv
->pinned_bo
== NULL
)
2679 INIT_LIST_HEAD(&validate_list
);
2681 pinned_val
.bo
= ttm_bo_reference(dev_priv
->pinned_bo
);
2682 list_add_tail(&pinned_val
.head
, &validate_list
);
2684 query_val
.bo
= ttm_bo_reference(dev_priv
->dummy_query_bo
);
2685 list_add_tail(&query_val
.head
, &validate_list
);
2688 ret
= ttm_eu_reserve_buffers(&ticket
, &validate_list
);
2689 } while (ret
== -ERESTARTSYS
);
2691 if (unlikely(ret
!= 0)) {
2692 vmw_execbuf_unpin_panic(dev_priv
);
2693 goto out_no_reserve
;
2696 if (dev_priv
->query_cid_valid
) {
2697 BUG_ON(fence
!= NULL
);
2698 ret
= vmw_fifo_emit_dummy_query(dev_priv
, dev_priv
->query_cid
);
2699 if (unlikely(ret
!= 0)) {
2700 vmw_execbuf_unpin_panic(dev_priv
);
2703 dev_priv
->query_cid_valid
= false;
2706 vmw_bo_pin(dev_priv
->pinned_bo
, false);
2707 vmw_bo_pin(dev_priv
->dummy_query_bo
, false);
2708 dev_priv
->dummy_query_bo_pinned
= false;
2710 if (fence
== NULL
) {
2711 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
, &lfence
,
2715 ttm_eu_fence_buffer_objects(&ticket
, &validate_list
, (void *) fence
);
2717 vmw_fence_obj_unreference(&lfence
);
2719 ttm_bo_unref(&query_val
.bo
);
2720 ttm_bo_unref(&pinned_val
.bo
);
2721 ttm_bo_unref(&dev_priv
->pinned_bo
);
2727 ttm_eu_backoff_reservation(&ticket
, &validate_list
);
2729 ttm_bo_unref(&query_val
.bo
);
2730 ttm_bo_unref(&pinned_val
.bo
);
2731 ttm_bo_unref(&dev_priv
->pinned_bo
);
2735 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
2738 * @dev_priv: The device private structure.
2740 * This function should be used to unpin the pinned query bo, or
2741 * as a query barrier when we need to make sure that all queries have
2742 * finished before the next fifo command. (For example on hardware
2743 * context destructions where the hardware may otherwise leak unfinished
2746 * This function does not return any failure codes, but make attempts
2747 * to do safe unpinning in case of errors.
2749 * The function will synchronize on the previous query barrier, and will
2750 * thus not finish until that barrier has executed.
2752 void vmw_execbuf_release_pinned_bo(struct vmw_private
*dev_priv
)
2754 mutex_lock(&dev_priv
->cmdbuf_mutex
);
2755 if (dev_priv
->query_cid_valid
)
2756 __vmw_execbuf_release_pinned_bo(dev_priv
, NULL
);
2757 mutex_unlock(&dev_priv
->cmdbuf_mutex
);
2761 int vmw_execbuf_ioctl(struct drm_device
*dev
, void *data
,
2762 struct drm_file
*file_priv
)
2764 struct vmw_private
*dev_priv
= vmw_priv(dev
);
2765 struct drm_vmw_execbuf_arg
*arg
= (struct drm_vmw_execbuf_arg
*)data
;
2769 * This will allow us to extend the ioctl argument while
2770 * maintaining backwards compatibility:
2771 * We take different code paths depending on the value of
2775 if (unlikely(arg
->version
!= DRM_VMW_EXECBUF_VERSION
)) {
2776 DRM_ERROR("Incorrect execbuf version.\n");
2777 DRM_ERROR("You're running outdated experimental "
2778 "vmwgfx user-space drivers.");
2782 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
2783 if (unlikely(ret
!= 0))
2786 ret
= vmw_execbuf_process(file_priv
, dev_priv
,
2787 (void __user
*)(unsigned long)arg
->commands
,
2788 NULL
, arg
->command_size
, arg
->throttle_us
,
2789 (void __user
*)(unsigned long)arg
->fence_rep
,
2792 if (unlikely(ret
!= 0))
2795 vmw_kms_cursor_post_execbuf(dev_priv
);
2798 ttm_read_unlock(&dev_priv
->reservation_sem
);