1 /**************************************************************************
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
32 #define VMW_COMPAT_SHADER_HT_ORDER 12
35 struct vmw_resource res
;
36 SVGA3dShaderType type
;
40 struct vmw_user_shader
{
41 struct ttm_base_object base
;
42 struct vmw_shader shader
;
46 * enum vmw_compat_shader_state - Staging state for compat shaders
48 enum vmw_compat_shader_state
{
55 * struct vmw_compat_shader - Metadata for compat shaders.
57 * @handle: The TTM handle of the guest backed shader.
58 * @tfile: The struct ttm_object_file the guest backed shader is registered
60 * @hash: Hash item for lookup.
61 * @head: List head for staging lists or the compat shader manager list.
62 * @state: Staging state.
64 * The structure is protected by the cmdbuf lock.
66 struct vmw_compat_shader
{
68 struct ttm_object_file
*tfile
;
69 struct drm_hash_item hash
;
70 struct list_head head
;
71 enum vmw_compat_shader_state state
;
75 * struct vmw_compat_shader_manager - Compat shader manager.
77 * @shaders: Hash table containing staged and commited compat shaders
78 * @list: List of commited shaders.
79 * @dev_priv: Pointer to a device private structure.
81 * @shaders and @list are protected by the cmdbuf mutex for now.
83 struct vmw_compat_shader_manager
{
84 struct drm_open_hash shaders
;
85 struct list_head list
;
86 struct vmw_private
*dev_priv
;
89 static void vmw_user_shader_free(struct vmw_resource
*res
);
90 static struct vmw_resource
*
91 vmw_user_shader_base_to_res(struct ttm_base_object
*base
);
93 static int vmw_gb_shader_create(struct vmw_resource
*res
);
94 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
95 struct ttm_validate_buffer
*val_buf
);
96 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
98 struct ttm_validate_buffer
*val_buf
);
99 static int vmw_gb_shader_destroy(struct vmw_resource
*res
);
101 static uint64_t vmw_user_shader_size
;
103 static const struct vmw_user_resource_conv user_shader_conv
= {
104 .object_type
= VMW_RES_SHADER
,
105 .base_obj_to_res
= vmw_user_shader_base_to_res
,
106 .res_free
= vmw_user_shader_free
109 const struct vmw_user_resource_conv
*user_shader_converter
=
113 static const struct vmw_res_func vmw_gb_shader_func
= {
114 .res_type
= vmw_res_shader
,
115 .needs_backup
= true,
117 .type_name
= "guest backed shaders",
118 .backup_placement
= &vmw_mob_placement
,
119 .create
= vmw_gb_shader_create
,
120 .destroy
= vmw_gb_shader_destroy
,
121 .bind
= vmw_gb_shader_bind
,
122 .unbind
= vmw_gb_shader_unbind
129 static inline struct vmw_shader
*
130 vmw_res_to_shader(struct vmw_resource
*res
)
132 return container_of(res
, struct vmw_shader
, res
);
135 static void vmw_hw_shader_destroy(struct vmw_resource
*res
)
137 (void) vmw_gb_shader_destroy(res
);
140 static int vmw_gb_shader_init(struct vmw_private
*dev_priv
,
141 struct vmw_resource
*res
,
144 SVGA3dShaderType type
,
145 struct vmw_dma_buffer
*byte_code
,
146 void (*res_free
) (struct vmw_resource
*res
))
148 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
151 ret
= vmw_resource_init(dev_priv
, res
, true,
152 res_free
, &vmw_gb_shader_func
);
155 if (unlikely(ret
!= 0)) {
163 res
->backup_size
= size
;
165 res
->backup
= vmw_dmabuf_reference(byte_code
);
166 res
->backup_offset
= offset
;
171 vmw_resource_activate(res
, vmw_hw_shader_destroy
);
175 static int vmw_gb_shader_create(struct vmw_resource
*res
)
177 struct vmw_private
*dev_priv
= res
->dev_priv
;
178 struct vmw_shader
*shader
= vmw_res_to_shader(res
);
181 SVGA3dCmdHeader header
;
182 SVGA3dCmdDefineGBShader body
;
185 if (likely(res
->id
!= -1))
188 ret
= vmw_resource_alloc_id(res
);
189 if (unlikely(ret
!= 0)) {
190 DRM_ERROR("Failed to allocate a shader id.\n");
194 if (unlikely(res
->id
>= VMWGFX_NUM_GB_SHADER
)) {
199 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
200 if (unlikely(cmd
== NULL
)) {
201 DRM_ERROR("Failed reserving FIFO space for shader "
207 cmd
->header
.id
= SVGA_3D_CMD_DEFINE_GB_SHADER
;
208 cmd
->header
.size
= sizeof(cmd
->body
);
209 cmd
->body
.shid
= res
->id
;
210 cmd
->body
.type
= shader
->type
;
211 cmd
->body
.sizeInBytes
= shader
->size
;
212 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
213 (void) vmw_3d_resource_inc(dev_priv
, false);
218 vmw_resource_release_id(res
);
223 static int vmw_gb_shader_bind(struct vmw_resource
*res
,
224 struct ttm_validate_buffer
*val_buf
)
226 struct vmw_private
*dev_priv
= res
->dev_priv
;
228 SVGA3dCmdHeader header
;
229 SVGA3dCmdBindGBShader body
;
231 struct ttm_buffer_object
*bo
= val_buf
->bo
;
233 BUG_ON(bo
->mem
.mem_type
!= VMW_PL_MOB
);
235 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
236 if (unlikely(cmd
== NULL
)) {
237 DRM_ERROR("Failed reserving FIFO space for shader "
242 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
243 cmd
->header
.size
= sizeof(cmd
->body
);
244 cmd
->body
.shid
= res
->id
;
245 cmd
->body
.mobid
= bo
->mem
.start
;
246 cmd
->body
.offsetInBytes
= 0;
247 res
->backup_dirty
= false;
248 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
253 static int vmw_gb_shader_unbind(struct vmw_resource
*res
,
255 struct ttm_validate_buffer
*val_buf
)
257 struct vmw_private
*dev_priv
= res
->dev_priv
;
259 SVGA3dCmdHeader header
;
260 SVGA3dCmdBindGBShader body
;
262 struct vmw_fence_obj
*fence
;
264 BUG_ON(res
->backup
->base
.mem
.mem_type
!= VMW_PL_MOB
);
266 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
267 if (unlikely(cmd
== NULL
)) {
268 DRM_ERROR("Failed reserving FIFO space for shader "
273 cmd
->header
.id
= SVGA_3D_CMD_BIND_GB_SHADER
;
274 cmd
->header
.size
= sizeof(cmd
->body
);
275 cmd
->body
.shid
= res
->id
;
276 cmd
->body
.mobid
= SVGA3D_INVALID_ID
;
277 cmd
->body
.offsetInBytes
= 0;
278 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
281 * Create a fence object and fence the backup buffer.
284 (void) vmw_execbuf_fence_commands(NULL
, dev_priv
,
287 vmw_fence_single_bo(val_buf
->bo
, fence
);
289 if (likely(fence
!= NULL
))
290 vmw_fence_obj_unreference(&fence
);
295 static int vmw_gb_shader_destroy(struct vmw_resource
*res
)
297 struct vmw_private
*dev_priv
= res
->dev_priv
;
299 SVGA3dCmdHeader header
;
300 SVGA3dCmdDestroyGBShader body
;
303 if (likely(res
->id
== -1))
306 mutex_lock(&dev_priv
->binding_mutex
);
307 vmw_context_binding_res_list_scrub(&res
->binding_head
);
309 cmd
= vmw_fifo_reserve(dev_priv
, sizeof(*cmd
));
310 if (unlikely(cmd
== NULL
)) {
311 DRM_ERROR("Failed reserving FIFO space for shader "
313 mutex_unlock(&dev_priv
->binding_mutex
);
317 cmd
->header
.id
= SVGA_3D_CMD_DESTROY_GB_SHADER
;
318 cmd
->header
.size
= sizeof(cmd
->body
);
319 cmd
->body
.shid
= res
->id
;
320 vmw_fifo_commit(dev_priv
, sizeof(*cmd
));
321 mutex_unlock(&dev_priv
->binding_mutex
);
322 vmw_resource_release_id(res
);
323 vmw_3d_resource_dec(dev_priv
, false);
329 * User-space shader management:
332 static struct vmw_resource
*
333 vmw_user_shader_base_to_res(struct ttm_base_object
*base
)
335 return &(container_of(base
, struct vmw_user_shader
, base
)->
339 static void vmw_user_shader_free(struct vmw_resource
*res
)
341 struct vmw_user_shader
*ushader
=
342 container_of(res
, struct vmw_user_shader
, shader
.res
);
343 struct vmw_private
*dev_priv
= res
->dev_priv
;
345 ttm_base_object_kfree(ushader
, base
);
346 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
347 vmw_user_shader_size
);
351 * This function is called when user space has no more references on the
352 * base object. It releases the base-object's reference on the resource object.
355 static void vmw_user_shader_base_release(struct ttm_base_object
**p_base
)
357 struct ttm_base_object
*base
= *p_base
;
358 struct vmw_resource
*res
= vmw_user_shader_base_to_res(base
);
361 vmw_resource_unreference(&res
);
364 int vmw_shader_destroy_ioctl(struct drm_device
*dev
, void *data
,
365 struct drm_file
*file_priv
)
367 struct drm_vmw_shader_arg
*arg
= (struct drm_vmw_shader_arg
*)data
;
368 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
370 return ttm_ref_object_base_unref(tfile
, arg
->handle
,
374 static int vmw_shader_alloc(struct vmw_private
*dev_priv
,
375 struct vmw_dma_buffer
*buffer
,
378 SVGA3dShaderType shader_type
,
379 struct ttm_object_file
*tfile
,
382 struct vmw_user_shader
*ushader
;
383 struct vmw_resource
*res
, *tmp
;
387 * Approximate idr memory usage with 128 bytes. It will be limited
388 * by maximum number_of shaders anyway.
390 if (unlikely(vmw_user_shader_size
== 0))
391 vmw_user_shader_size
=
392 ttm_round_pot(sizeof(struct vmw_user_shader
)) + 128;
394 ret
= ttm_mem_global_alloc(vmw_mem_glob(dev_priv
),
395 vmw_user_shader_size
,
397 if (unlikely(ret
!= 0)) {
398 if (ret
!= -ERESTARTSYS
)
399 DRM_ERROR("Out of graphics memory for shader "
404 ushader
= kzalloc(sizeof(*ushader
), GFP_KERNEL
);
405 if (unlikely(ushader
== NULL
)) {
406 ttm_mem_global_free(vmw_mem_glob(dev_priv
),
407 vmw_user_shader_size
);
412 res
= &ushader
->shader
.res
;
413 ushader
->base
.shareable
= false;
414 ushader
->base
.tfile
= NULL
;
417 * From here on, the destructor takes over resource freeing.
420 ret
= vmw_gb_shader_init(dev_priv
, res
, shader_size
,
421 offset
, shader_type
, buffer
,
422 vmw_user_shader_free
);
423 if (unlikely(ret
!= 0))
426 tmp
= vmw_resource_reference(res
);
427 ret
= ttm_base_object_init(tfile
, &ushader
->base
, false,
429 &vmw_user_shader_base_release
, NULL
);
431 if (unlikely(ret
!= 0)) {
432 vmw_resource_unreference(&tmp
);
437 *handle
= ushader
->base
.hash
.key
;
439 vmw_resource_unreference(&res
);
445 int vmw_shader_define_ioctl(struct drm_device
*dev
, void *data
,
446 struct drm_file
*file_priv
)
448 struct vmw_private
*dev_priv
= vmw_priv(dev
);
449 struct drm_vmw_shader_create_arg
*arg
=
450 (struct drm_vmw_shader_create_arg
*)data
;
451 struct ttm_object_file
*tfile
= vmw_fpriv(file_priv
)->tfile
;
452 struct vmw_dma_buffer
*buffer
= NULL
;
453 SVGA3dShaderType shader_type
;
456 if (arg
->buffer_handle
!= SVGA3D_INVALID_ID
) {
457 ret
= vmw_user_dmabuf_lookup(tfile
, arg
->buffer_handle
,
459 if (unlikely(ret
!= 0)) {
460 DRM_ERROR("Could not find buffer for shader "
465 if ((u64
)buffer
->base
.num_pages
* PAGE_SIZE
<
466 (u64
)arg
->size
+ (u64
)arg
->offset
) {
467 DRM_ERROR("Illegal buffer- or shader size.\n");
473 switch (arg
->shader_type
) {
474 case drm_vmw_shader_type_vs
:
475 shader_type
= SVGA3D_SHADERTYPE_VS
;
477 case drm_vmw_shader_type_ps
:
478 shader_type
= SVGA3D_SHADERTYPE_PS
;
480 case drm_vmw_shader_type_gs
:
481 shader_type
= SVGA3D_SHADERTYPE_GS
;
484 DRM_ERROR("Illegal shader type.\n");
489 ret
= ttm_read_lock(&dev_priv
->reservation_sem
, true);
490 if (unlikely(ret
!= 0))
493 ret
= vmw_shader_alloc(dev_priv
, buffer
, arg
->size
, arg
->offset
,
494 shader_type
, tfile
, &arg
->shader_handle
);
496 ttm_read_unlock(&dev_priv
->reservation_sem
);
498 vmw_dmabuf_unreference(&buffer
);
503 * vmw_compat_shader_lookup - Look up a compat shader
505 * @man: Pointer to the compat shader manager.
506 * @shader_type: The shader type, that combined with the user_key identifies
508 * @user_key: On entry, this should be a pointer to the user_key.
509 * On successful exit, it will contain the guest-backed shader's TTM handle.
511 * Returns 0 on success. Non-zero on failure, in which case the value pointed
512 * to by @user_key is unmodified.
514 int vmw_compat_shader_lookup(struct vmw_compat_shader_manager
*man
,
515 SVGA3dShaderType shader_type
,
518 struct drm_hash_item
*hash
;
520 unsigned long key
= *user_key
| (shader_type
<< 24);
522 ret
= drm_ht_find_item(&man
->shaders
, key
, &hash
);
523 if (unlikely(ret
!= 0))
526 *user_key
= drm_hash_entry(hash
, struct vmw_compat_shader
,
533 * vmw_compat_shader_free - Free a compat shader.
535 * @man: Pointer to the compat shader manager.
536 * @entry: Pointer to a struct vmw_compat_shader.
538 * Frees a struct vmw_compat_shder entry and drops its reference to the
539 * guest backed shader.
541 static void vmw_compat_shader_free(struct vmw_compat_shader_manager
*man
,
542 struct vmw_compat_shader
*entry
)
544 list_del(&entry
->head
);
545 WARN_ON(drm_ht_remove_item(&man
->shaders
, &entry
->hash
));
546 WARN_ON(ttm_ref_object_base_unref(entry
->tfile
, entry
->handle
,
552 * vmw_compat_shaders_commit - Commit a list of compat shader actions.
554 * @man: Pointer to the compat shader manager.
555 * @list: Caller's list of compat shader actions.
557 * This function commits a list of compat shader additions or removals.
558 * It is typically called when the execbuf ioctl call triggering these
559 * actions has commited the fifo contents to the device.
561 void vmw_compat_shaders_commit(struct vmw_compat_shader_manager
*man
,
562 struct list_head
*list
)
564 struct vmw_compat_shader
*entry
, *next
;
566 list_for_each_entry_safe(entry
, next
, list
, head
) {
567 list_del(&entry
->head
);
568 switch (entry
->state
) {
570 entry
->state
= VMW_COMPAT_COMMITED
;
571 list_add_tail(&entry
->head
, &man
->list
);
574 ttm_ref_object_base_unref(entry
->tfile
, entry
->handle
,
586 * vmw_compat_shaders_revert - Revert a list of compat shader actions
588 * @man: Pointer to the compat shader manager.
589 * @list: Caller's list of compat shader actions.
591 * This function reverts a list of compat shader additions or removals.
592 * It is typically called when the execbuf ioctl call triggering these
593 * actions failed for some reason, and the command stream was never
596 void vmw_compat_shaders_revert(struct vmw_compat_shader_manager
*man
,
597 struct list_head
*list
)
599 struct vmw_compat_shader
*entry
, *next
;
602 list_for_each_entry_safe(entry
, next
, list
, head
) {
603 switch (entry
->state
) {
605 vmw_compat_shader_free(man
, entry
);
608 ret
= drm_ht_insert_item(&man
->shaders
, &entry
->hash
);
609 list_del(&entry
->head
);
610 list_add_tail(&entry
->head
, &man
->list
);
611 entry
->state
= VMW_COMPAT_COMMITED
;
621 * vmw_compat_shader_remove - Stage a compat shader for removal.
623 * @man: Pointer to the compat shader manager
624 * @user_key: The key that is used to identify the shader. The key is
625 * unique to the shader type.
626 * @shader_type: Shader type.
627 * @list: Caller's list of staged shader actions.
629 * This function stages a compat shader for removal and removes the key from
630 * the shader manager's hash table. If the shader was previously only staged
631 * for addition it is completely removed (But the execbuf code may keep a
632 * reference if it was bound to a context between addition and removal). If
633 * it was previously commited to the manager, it is staged for removal.
635 int vmw_compat_shader_remove(struct vmw_compat_shader_manager
*man
,
636 u32 user_key
, SVGA3dShaderType shader_type
,
637 struct list_head
*list
)
639 struct vmw_compat_shader
*entry
;
640 struct drm_hash_item
*hash
;
643 ret
= drm_ht_find_item(&man
->shaders
, user_key
| (shader_type
<< 24),
645 if (likely(ret
!= 0))
648 entry
= drm_hash_entry(hash
, struct vmw_compat_shader
, hash
);
650 switch (entry
->state
) {
652 vmw_compat_shader_free(man
, entry
);
654 case VMW_COMPAT_COMMITED
:
655 (void) drm_ht_remove_item(&man
->shaders
, &entry
->hash
);
656 list_del(&entry
->head
);
657 entry
->state
= VMW_COMPAT_DEL
;
658 list_add_tail(&entry
->head
, list
);
669 * vmw_compat_shader_add - Create a compat shader and add the
672 * @man: Pointer to the compat shader manager
673 * @user_key: The key that is used to identify the shader. The key is
674 * unique to the shader type.
675 * @bytecode: Pointer to the bytecode of the shader.
676 * @shader_type: Shader type.
677 * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
678 * to be created with.
679 * @list: Caller's list of staged shader actions.
681 * Note that only the key is added to the shader manager's hash table.
682 * The shader is not yet added to the shader manager's list of shaders.
684 int vmw_compat_shader_add(struct vmw_compat_shader_manager
*man
,
685 u32 user_key
, const void *bytecode
,
686 SVGA3dShaderType shader_type
,
688 struct ttm_object_file
*tfile
,
689 struct list_head
*list
)
691 struct vmw_dma_buffer
*buf
;
692 struct ttm_bo_kmap_obj map
;
694 struct vmw_compat_shader
*compat
;
698 if (user_key
> ((1 << 24) - 1) || (unsigned) shader_type
> 16)
701 /* Allocate and pin a DMA buffer */
702 buf
= kzalloc(sizeof(*buf
), GFP_KERNEL
);
703 if (unlikely(buf
== NULL
))
706 ret
= vmw_dmabuf_init(man
->dev_priv
, buf
, size
, &vmw_sys_ne_placement
,
707 true, vmw_dmabuf_bo_free
);
708 if (unlikely(ret
!= 0))
711 ret
= ttm_bo_reserve(&buf
->base
, false, true, false, NULL
);
712 if (unlikely(ret
!= 0))
715 /* Map and copy shader bytecode. */
716 ret
= ttm_bo_kmap(&buf
->base
, 0, PAGE_ALIGN(size
) >> PAGE_SHIFT
,
718 if (unlikely(ret
!= 0)) {
719 ttm_bo_unreserve(&buf
->base
);
723 memcpy(ttm_kmap_obj_virtual(&map
, &is_iomem
), bytecode
, size
);
727 ret
= ttm_bo_validate(&buf
->base
, &vmw_sys_placement
, false, true);
729 ttm_bo_unreserve(&buf
->base
);
731 /* Create a guest-backed shader container backed by the dma buffer */
732 ret
= vmw_shader_alloc(man
->dev_priv
, buf
, size
, 0, shader_type
,
734 vmw_dmabuf_unreference(&buf
);
735 if (unlikely(ret
!= 0))
738 * Create a compat shader structure and stage it for insertion
741 compat
= kzalloc(sizeof(*compat
), GFP_KERNEL
);
745 compat
->hash
.key
= user_key
| (shader_type
<< 24);
746 ret
= drm_ht_insert_item(&man
->shaders
, &compat
->hash
);
747 if (unlikely(ret
!= 0))
748 goto out_invalid_key
;
750 compat
->state
= VMW_COMPAT_ADD
;
751 compat
->handle
= handle
;
752 compat
->tfile
= tfile
;
753 list_add_tail(&compat
->head
, list
);
760 ttm_ref_object_base_unref(tfile
, handle
, TTM_REF_USAGE
);
767 * vmw_compat_shader_man_create - Create a compat shader manager
769 * @dev_priv: Pointer to a device private structure.
771 * Typically done at file open time. If successful returns a pointer to a
772 * compat shader manager. Otherwise returns an error pointer.
774 struct vmw_compat_shader_manager
*
775 vmw_compat_shader_man_create(struct vmw_private
*dev_priv
)
777 struct vmw_compat_shader_manager
*man
;
780 man
= kzalloc(sizeof(*man
), GFP_KERNEL
);
782 return ERR_PTR(-ENOMEM
);
784 man
->dev_priv
= dev_priv
;
785 INIT_LIST_HEAD(&man
->list
);
786 ret
= drm_ht_create(&man
->shaders
, VMW_COMPAT_SHADER_HT_ORDER
);
795 * vmw_compat_shader_man_destroy - Destroy a compat shader manager
797 * @man: Pointer to the shader manager to destroy.
799 * Typically done at file close time.
801 void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager
*man
)
803 struct vmw_compat_shader
*entry
, *next
;
805 mutex_lock(&man
->dev_priv
->cmdbuf_mutex
);
806 list_for_each_entry_safe(entry
, next
, &man
->list
, head
)
807 vmw_compat_shader_free(man
, entry
);
809 mutex_unlock(&man
->dev_priv
->cmdbuf_mutex
);