drm/vmwgfx: Fix query buffer locking order violation
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_context.c
CommitLineData
543831cf
TH
1/**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_resource_priv.h"
30#include "ttm/ttm_placement.h"
31
32struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
173fb7d4 35 struct vmw_ctx_binding_state cbs;
543831cf
TH
36};
37
b5c3b1a6
TH
38
39
30f82d81 40typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
b5c3b1a6 41
543831cf
TH
42static void vmw_user_context_free(struct vmw_resource *res);
43static struct vmw_resource *
44vmw_user_context_base_to_res(struct ttm_base_object *base);
45
58a0c5f0
TH
46static int vmw_gb_context_create(struct vmw_resource *res);
47static int vmw_gb_context_bind(struct vmw_resource *res,
48 struct ttm_validate_buffer *val_buf);
49static int vmw_gb_context_unbind(struct vmw_resource *res,
50 bool readback,
51 struct ttm_validate_buffer *val_buf);
52static int vmw_gb_context_destroy(struct vmw_resource *res);
30f82d81
TH
53static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
54static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
55 bool rebind);
56static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
57static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
173fb7d4 58static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
543831cf
TH
59static uint64_t vmw_user_context_size;
60
61static const struct vmw_user_resource_conv user_context_conv = {
62 .object_type = VMW_RES_CONTEXT,
63 .base_obj_to_res = vmw_user_context_base_to_res,
64 .res_free = vmw_user_context_free
65};
66
67const struct vmw_user_resource_conv *user_context_converter =
68 &user_context_conv;
69
70
71static const struct vmw_res_func vmw_legacy_context_func = {
72 .res_type = vmw_res_context,
73 .needs_backup = false,
74 .may_evict = false,
75 .type_name = "legacy contexts",
76 .backup_placement = NULL,
77 .create = NULL,
78 .destroy = NULL,
79 .bind = NULL,
80 .unbind = NULL
81};
82
58a0c5f0
TH
83static const struct vmw_res_func vmw_gb_context_func = {
84 .res_type = vmw_res_context,
85 .needs_backup = true,
86 .may_evict = true,
87 .type_name = "guest backed contexts",
88 .backup_placement = &vmw_mob_placement,
89 .create = vmw_gb_context_create,
90 .destroy = vmw_gb_context_destroy,
91 .bind = vmw_gb_context_bind,
92 .unbind = vmw_gb_context_unbind
93};
94
b5c3b1a6
TH
95static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
96 [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
97 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
98 [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
99
543831cf
TH
100/**
101 * Context management:
102 */
103
104static void vmw_hw_context_destroy(struct vmw_resource *res)
105{
106
107 struct vmw_private *dev_priv = res->dev_priv;
108 struct {
109 SVGA3dCmdHeader header;
110 SVGA3dCmdDestroyContext body;
111 } *cmd;
112
113
58a0c5f0
TH
114 if (res->func->destroy == vmw_gb_context_destroy) {
115 mutex_lock(&dev_priv->cmdbuf_mutex);
30f82d81
TH
116 mutex_lock(&dev_priv->binding_mutex);
117 (void) vmw_context_binding_state_kill
118 (&container_of(res, struct vmw_user_context, res)->cbs);
58a0c5f0 119 (void) vmw_gb_context_destroy(res);
c8e5e010 120 mutex_unlock(&dev_priv->binding_mutex);
58a0c5f0
TH
121 if (dev_priv->pinned_bo != NULL &&
122 !dev_priv->query_cid_valid)
123 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
124 mutex_unlock(&dev_priv->cmdbuf_mutex);
125 return;
126 }
127
543831cf
TH
128 vmw_execbuf_release_pinned_bo(dev_priv);
129 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
130 if (unlikely(cmd == NULL)) {
131 DRM_ERROR("Failed reserving FIFO space for surface "
132 "destruction.\n");
133 return;
134 }
135
136 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
137 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
138 cmd->body.cid = cpu_to_le32(res->id);
139
140 vmw_fifo_commit(dev_priv, sizeof(*cmd));
141 vmw_3d_resource_dec(dev_priv, false);
142}
143
58a0c5f0
TH
144static int vmw_gb_context_init(struct vmw_private *dev_priv,
145 struct vmw_resource *res,
146 void (*res_free) (struct vmw_resource *res))
147{
148 int ret;
173fb7d4
TH
149 struct vmw_user_context *uctx =
150 container_of(res, struct vmw_user_context, res);
58a0c5f0
TH
151
152 ret = vmw_resource_init(dev_priv, res, true,
153 res_free, &vmw_gb_context_func);
154 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
155
156 if (unlikely(ret != 0)) {
157 if (res_free)
158 res_free(res);
159 else
160 kfree(res);
161 return ret;
162 }
163
173fb7d4
TH
164 memset(&uctx->cbs, 0, sizeof(uctx->cbs));
165 INIT_LIST_HEAD(&uctx->cbs.list);
166
58a0c5f0
TH
167 vmw_resource_activate(res, vmw_hw_context_destroy);
168 return 0;
169}
170
543831cf
TH
171static int vmw_context_init(struct vmw_private *dev_priv,
172 struct vmw_resource *res,
173 void (*res_free) (struct vmw_resource *res))
174{
175 int ret;
176
177 struct {
178 SVGA3dCmdHeader header;
179 SVGA3dCmdDefineContext body;
180 } *cmd;
181
58a0c5f0
TH
182 if (dev_priv->has_mob)
183 return vmw_gb_context_init(dev_priv, res, res_free);
184
543831cf
TH
185 ret = vmw_resource_init(dev_priv, res, false,
186 res_free, &vmw_legacy_context_func);
187
188 if (unlikely(ret != 0)) {
189 DRM_ERROR("Failed to allocate a resource id.\n");
190 goto out_early;
191 }
192
193 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
194 DRM_ERROR("Out of hw context ids.\n");
195 vmw_resource_unreference(&res);
196 return -ENOMEM;
197 }
198
199 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
200 if (unlikely(cmd == NULL)) {
201 DRM_ERROR("Fifo reserve failed.\n");
202 vmw_resource_unreference(&res);
203 return -ENOMEM;
204 }
205
206 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
207 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
208 cmd->body.cid = cpu_to_le32(res->id);
209
210 vmw_fifo_commit(dev_priv, sizeof(*cmd));
211 (void) vmw_3d_resource_inc(dev_priv, false);
212 vmw_resource_activate(res, vmw_hw_context_destroy);
213 return 0;
214
215out_early:
216 if (res_free == NULL)
217 kfree(res);
218 else
219 res_free(res);
220 return ret;
221}
222
223struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
224{
225 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
226 int ret;
227
228 if (unlikely(res == NULL))
229 return NULL;
230
231 ret = vmw_context_init(dev_priv, res, NULL);
232
233 return (ret == 0) ? res : NULL;
234}
235
58a0c5f0
TH
236
237static int vmw_gb_context_create(struct vmw_resource *res)
238{
239 struct vmw_private *dev_priv = res->dev_priv;
240 int ret;
241 struct {
242 SVGA3dCmdHeader header;
243 SVGA3dCmdDefineGBContext body;
244 } *cmd;
245
246 if (likely(res->id != -1))
247 return 0;
248
249 ret = vmw_resource_alloc_id(res);
250 if (unlikely(ret != 0)) {
251 DRM_ERROR("Failed to allocate a context id.\n");
252 goto out_no_id;
253 }
254
255 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
256 ret = -EBUSY;
257 goto out_no_fifo;
258 }
259
260 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
261 if (unlikely(cmd == NULL)) {
262 DRM_ERROR("Failed reserving FIFO space for context "
263 "creation.\n");
264 ret = -ENOMEM;
265 goto out_no_fifo;
266 }
267
268 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
269 cmd->header.size = sizeof(cmd->body);
270 cmd->body.cid = res->id;
271 vmw_fifo_commit(dev_priv, sizeof(*cmd));
272 (void) vmw_3d_resource_inc(dev_priv, false);
273
274 return 0;
275
276out_no_fifo:
277 vmw_resource_release_id(res);
278out_no_id:
279 return ret;
280}
281
282static int vmw_gb_context_bind(struct vmw_resource *res,
283 struct ttm_validate_buffer *val_buf)
284{
285 struct vmw_private *dev_priv = res->dev_priv;
286 struct {
287 SVGA3dCmdHeader header;
288 SVGA3dCmdBindGBContext body;
289 } *cmd;
290 struct ttm_buffer_object *bo = val_buf->bo;
291
292 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
293
294 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
295 if (unlikely(cmd == NULL)) {
296 DRM_ERROR("Failed reserving FIFO space for context "
297 "binding.\n");
298 return -ENOMEM;
299 }
300
301 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
302 cmd->header.size = sizeof(cmd->body);
303 cmd->body.cid = res->id;
304 cmd->body.mobid = bo->mem.start;
305 cmd->body.validContents = res->backup_dirty;
306 res->backup_dirty = false;
307 vmw_fifo_commit(dev_priv, sizeof(*cmd));
308
309 return 0;
310}
311
312static int vmw_gb_context_unbind(struct vmw_resource *res,
313 bool readback,
314 struct ttm_validate_buffer *val_buf)
315{
316 struct vmw_private *dev_priv = res->dev_priv;
317 struct ttm_buffer_object *bo = val_buf->bo;
318 struct vmw_fence_obj *fence;
173fb7d4
TH
319 struct vmw_user_context *uctx =
320 container_of(res, struct vmw_user_context, res);
58a0c5f0
TH
321
322 struct {
323 SVGA3dCmdHeader header;
324 SVGA3dCmdReadbackGBContext body;
325 } *cmd1;
326 struct {
327 SVGA3dCmdHeader header;
328 SVGA3dCmdBindGBContext body;
329 } *cmd2;
330 uint32_t submit_size;
331 uint8_t *cmd;
332
333
334 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
335
173fb7d4 336 mutex_lock(&dev_priv->binding_mutex);
30f82d81 337 vmw_context_binding_state_scrub(&uctx->cbs);
173fb7d4 338
58a0c5f0
TH
339 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
340
341 cmd = vmw_fifo_reserve(dev_priv, submit_size);
342 if (unlikely(cmd == NULL)) {
343 DRM_ERROR("Failed reserving FIFO space for context "
344 "unbinding.\n");
173fb7d4 345 mutex_unlock(&dev_priv->binding_mutex);
58a0c5f0
TH
346 return -ENOMEM;
347 }
348
349 cmd2 = (void *) cmd;
350 if (readback) {
351 cmd1 = (void *) cmd;
352 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
353 cmd1->header.size = sizeof(cmd1->body);
354 cmd1->body.cid = res->id;
355 cmd2 = (void *) (&cmd1[1]);
356 }
357 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 cmd2->header.size = sizeof(cmd2->body);
359 cmd2->body.cid = res->id;
360 cmd2->body.mobid = SVGA3D_INVALID_ID;
361
362 vmw_fifo_commit(dev_priv, submit_size);
173fb7d4 363 mutex_unlock(&dev_priv->binding_mutex);
58a0c5f0
TH
364
365 /*
366 * Create a fence object and fence the backup buffer.
367 */
368
369 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
370 &fence, NULL);
371
372 vmw_fence_single_bo(bo, fence);
373
374 if (likely(fence != NULL))
375 vmw_fence_obj_unreference(&fence);
376
377 return 0;
378}
379
380static int vmw_gb_context_destroy(struct vmw_resource *res)
381{
382 struct vmw_private *dev_priv = res->dev_priv;
383 struct {
384 SVGA3dCmdHeader header;
385 SVGA3dCmdDestroyGBContext body;
386 } *cmd;
387
388 if (likely(res->id == -1))
389 return 0;
390
391 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
392 if (unlikely(cmd == NULL)) {
393 DRM_ERROR("Failed reserving FIFO space for context "
394 "destruction.\n");
395 return -ENOMEM;
396 }
397
398 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
399 cmd->header.size = sizeof(cmd->body);
400 cmd->body.cid = res->id;
401 vmw_fifo_commit(dev_priv, sizeof(*cmd));
402 if (dev_priv->query_cid == res->id)
403 dev_priv->query_cid_valid = false;
404 vmw_resource_release_id(res);
405 vmw_3d_resource_dec(dev_priv, false);
406
407 return 0;
408}
409
543831cf
TH
410/**
411 * User-space context management:
412 */
413
414static struct vmw_resource *
415vmw_user_context_base_to_res(struct ttm_base_object *base)
416{
417 return &(container_of(base, struct vmw_user_context, base)->res);
418}
419
420static void vmw_user_context_free(struct vmw_resource *res)
421{
422 struct vmw_user_context *ctx =
423 container_of(res, struct vmw_user_context, res);
424 struct vmw_private *dev_priv = res->dev_priv;
425
426 ttm_base_object_kfree(ctx, base);
427 ttm_mem_global_free(vmw_mem_glob(dev_priv),
428 vmw_user_context_size);
429}
430
431/**
432 * This function is called when user space has no more references on the
433 * base object. It releases the base-object's reference on the resource object.
434 */
435
436static void vmw_user_context_base_release(struct ttm_base_object **p_base)
437{
438 struct ttm_base_object *base = *p_base;
439 struct vmw_user_context *ctx =
440 container_of(base, struct vmw_user_context, base);
441 struct vmw_resource *res = &ctx->res;
442
443 *p_base = NULL;
444 vmw_resource_unreference(&res);
445}
446
447int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
448 struct drm_file *file_priv)
449{
450 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
451 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
452
453 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
454}
455
456int vmw_context_define_ioctl(struct drm_device *dev, void *data,
457 struct drm_file *file_priv)
458{
459 struct vmw_private *dev_priv = vmw_priv(dev);
460 struct vmw_user_context *ctx;
461 struct vmw_resource *res;
462 struct vmw_resource *tmp;
463 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
464 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
543831cf
TH
465 int ret;
466
467
468 /*
469 * Approximate idr memory usage with 128 bytes. It will be limited
470 * by maximum number_of contexts anyway.
471 */
472
473 if (unlikely(vmw_user_context_size == 0))
474 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
475
294adf7d 476 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
543831cf
TH
477 if (unlikely(ret != 0))
478 return ret;
479
480 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
481 vmw_user_context_size,
482 false, true);
483 if (unlikely(ret != 0)) {
484 if (ret != -ERESTARTSYS)
485 DRM_ERROR("Out of graphics memory for context"
486 " creation.\n");
487 goto out_unlock;
488 }
489
490 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
491 if (unlikely(ctx == NULL)) {
492 ttm_mem_global_free(vmw_mem_glob(dev_priv),
493 vmw_user_context_size);
494 ret = -ENOMEM;
495 goto out_unlock;
496 }
497
498 res = &ctx->res;
499 ctx->base.shareable = false;
500 ctx->base.tfile = NULL;
501
502 /*
503 * From here on, the destructor takes over resource freeing.
504 */
505
506 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
507 if (unlikely(ret != 0))
508 goto out_unlock;
509
510 tmp = vmw_resource_reference(&ctx->res);
511 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
512 &vmw_user_context_base_release, NULL);
513
514 if (unlikely(ret != 0)) {
515 vmw_resource_unreference(&tmp);
516 goto out_err;
517 }
518
519 arg->cid = ctx->base.hash.key;
520out_err:
521 vmw_resource_unreference(&res);
522out_unlock:
294adf7d 523 ttm_read_unlock(&dev_priv->reservation_sem);
543831cf
TH
524 return ret;
525
526}
b5c3b1a6
TH
527
528/**
529 * vmw_context_scrub_shader - scrub a shader binding from a context.
530 *
531 * @bi: single binding information.
30f82d81 532 * @rebind: Whether to issue a bind instead of scrub command.
b5c3b1a6 533 */
30f82d81 534static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
b5c3b1a6
TH
535{
536 struct vmw_private *dev_priv = bi->ctx->dev_priv;
537 struct {
538 SVGA3dCmdHeader header;
539 SVGA3dCmdSetShader body;
540 } *cmd;
541
542 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
543 if (unlikely(cmd == NULL)) {
544 DRM_ERROR("Failed reserving FIFO space for shader "
545 "unbinding.\n");
546 return -ENOMEM;
547 }
548
549 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
550 cmd->header.size = sizeof(cmd->body);
551 cmd->body.cid = bi->ctx->id;
552 cmd->body.type = bi->i1.shader_type;
8e67bbbc 553 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
b5c3b1a6
TH
554 vmw_fifo_commit(dev_priv, sizeof(*cmd));
555
556 return 0;
557}
558
559/**
560 * vmw_context_scrub_render_target - scrub a render target binding
561 * from a context.
562 *
563 * @bi: single binding information.
30f82d81 564 * @rebind: Whether to issue a bind instead of scrub command.
b5c3b1a6 565 */
30f82d81
TH
566static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
567 bool rebind)
b5c3b1a6
TH
568{
569 struct vmw_private *dev_priv = bi->ctx->dev_priv;
570 struct {
571 SVGA3dCmdHeader header;
572 SVGA3dCmdSetRenderTarget body;
573 } *cmd;
574
575 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
576 if (unlikely(cmd == NULL)) {
577 DRM_ERROR("Failed reserving FIFO space for render target "
578 "unbinding.\n");
579 return -ENOMEM;
580 }
581
582 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
583 cmd->header.size = sizeof(cmd->body);
584 cmd->body.cid = bi->ctx->id;
585 cmd->body.type = bi->i1.rt_type;
8e67bbbc 586 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
b5c3b1a6
TH
587 cmd->body.target.face = 0;
588 cmd->body.target.mipmap = 0;
589 vmw_fifo_commit(dev_priv, sizeof(*cmd));
590
591 return 0;
592}
593
594/**
595 * vmw_context_scrub_texture - scrub a texture binding from a context.
596 *
597 * @bi: single binding information.
30f82d81 598 * @rebind: Whether to issue a bind instead of scrub command.
b5c3b1a6
TH
599 *
600 * TODO: Possibly complement this function with a function that takes
601 * a list of texture bindings and combines them to a single command.
602 */
30f82d81
TH
603static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
604 bool rebind)
b5c3b1a6
TH
605{
606 struct vmw_private *dev_priv = bi->ctx->dev_priv;
607 struct {
608 SVGA3dCmdHeader header;
609 struct {
610 SVGA3dCmdSetTextureState c;
611 SVGA3dTextureState s1;
612 } body;
613 } *cmd;
614
615 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
616 if (unlikely(cmd == NULL)) {
617 DRM_ERROR("Failed reserving FIFO space for texture "
618 "unbinding.\n");
619 return -ENOMEM;
620 }
621
622
623 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
624 cmd->header.size = sizeof(cmd->body);
625 cmd->body.c.cid = bi->ctx->id;
626 cmd->body.s1.stage = bi->i1.texture_stage;
627 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
8e67bbbc 628 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
b5c3b1a6
TH
629 vmw_fifo_commit(dev_priv, sizeof(*cmd));
630
631 return 0;
632}
633
634/**
635 * vmw_context_binding_drop: Stop tracking a context binding
636 *
637 * @cb: Pointer to binding tracker storage.
638 *
639 * Stops tracking a context binding, and re-initializes its storage.
640 * Typically used when the context binding is replaced with a binding to
641 * another (or the same, for that matter) resource.
642 */
643static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
644{
645 list_del(&cb->ctx_list);
173fb7d4
TH
646 if (!list_empty(&cb->res_list))
647 list_del(&cb->res_list);
b5c3b1a6
TH
648 cb->bi.ctx = NULL;
649}
650
651/**
652 * vmw_context_binding_add: Start tracking a context binding
653 *
654 * @cbs: Pointer to the context binding state tracker.
655 * @bi: Information about the binding to track.
656 *
657 * Performs basic checks on the binding to make sure arguments are within
658 * bounds and then starts tracking the binding in the context binding
659 * state structure @cbs.
660 */
661int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
662 const struct vmw_ctx_bindinfo *bi)
663{
664 struct vmw_ctx_binding *loc;
665
666 switch (bi->bt) {
667 case vmw_ctx_binding_rt:
668 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
669 DRM_ERROR("Illegal render target type %u.\n",
670 (unsigned) bi->i1.rt_type);
671 return -EINVAL;
672 }
673 loc = &cbs->render_targets[bi->i1.rt_type];
674 break;
675 case vmw_ctx_binding_tex:
676 if (unlikely((unsigned)bi->i1.texture_stage >=
677 SVGA3D_NUM_TEXTURE_UNITS)) {
678 DRM_ERROR("Illegal texture/sampler unit %u.\n",
679 (unsigned) bi->i1.texture_stage);
680 return -EINVAL;
681 }
682 loc = &cbs->texture_units[bi->i1.texture_stage];
683 break;
684 case vmw_ctx_binding_shader:
685 if (unlikely((unsigned)bi->i1.shader_type >=
686 SVGA3D_SHADERTYPE_MAX)) {
687 DRM_ERROR("Illegal shader type %u.\n",
688 (unsigned) bi->i1.shader_type);
689 return -EINVAL;
690 }
691 loc = &cbs->shaders[bi->i1.shader_type];
692 break;
693 default:
694 BUG();
695 }
696
697 if (loc->bi.ctx != NULL)
698 vmw_context_binding_drop(loc);
699
700 loc->bi = *bi;
30f82d81 701 loc->bi.scrubbed = false;
b5c3b1a6 702 list_add_tail(&loc->ctx_list, &cbs->list);
173fb7d4 703 INIT_LIST_HEAD(&loc->res_list);
b5c3b1a6
TH
704
705 return 0;
706}
707
173fb7d4
TH
708/**
709 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
710 *
711 * @cbs: Pointer to the persistent context binding state tracker.
712 * @bi: Information about the binding to track.
713 *
714 */
715static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
716 const struct vmw_ctx_bindinfo *bi)
717{
718 struct vmw_ctx_binding *loc;
719
720 switch (bi->bt) {
721 case vmw_ctx_binding_rt:
722 loc = &cbs->render_targets[bi->i1.rt_type];
723 break;
724 case vmw_ctx_binding_tex:
725 loc = &cbs->texture_units[bi->i1.texture_stage];
726 break;
727 case vmw_ctx_binding_shader:
728 loc = &cbs->shaders[bi->i1.shader_type];
729 break;
730 default:
731 BUG();
732 }
733
734 if (loc->bi.ctx != NULL)
735 vmw_context_binding_drop(loc);
736
30f82d81
TH
737 if (bi->res != NULL) {
738 loc->bi = *bi;
739 list_add_tail(&loc->ctx_list, &cbs->list);
173fb7d4 740 list_add_tail(&loc->res_list, &bi->res->binding_head);
30f82d81 741 }
173fb7d4
TH
742}
743
b5c3b1a6
TH
744/**
745 * vmw_context_binding_kill - Kill a binding on the device
746 * and stop tracking it.
747 *
748 * @cb: Pointer to binding tracker storage.
749 *
750 * Emits FIFO commands to scrub a binding represented by @cb.
751 * Then stops tracking the binding and re-initializes its storage.
752 */
3e894a62 753static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
b5c3b1a6 754{
30f82d81
TH
755 if (!cb->bi.scrubbed) {
756 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
757 cb->bi.scrubbed = true;
758 }
b5c3b1a6
TH
759 vmw_context_binding_drop(cb);
760}
761
762/**
763 * vmw_context_binding_state_kill - Kill all bindings associated with a
764 * struct vmw_ctx_binding state structure, and re-initialize the structure.
765 *
766 * @cbs: Pointer to the context binding state tracker.
767 *
768 * Emits commands to scrub all bindings associated with the
769 * context binding state tracker. Then re-initializes the whole structure.
770 */
173fb7d4 771static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
b5c3b1a6
TH
772{
773 struct vmw_ctx_binding *entry, *next;
774
173fb7d4 775 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
b5c3b1a6 776 vmw_context_binding_kill(entry);
173fb7d4
TH
777}
778
30f82d81
TH
779/**
780 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
781 * struct vmw_ctx_binding state structure.
782 *
783 * @cbs: Pointer to the context binding state tracker.
784 *
785 * Emits commands to scrub all bindings associated with the
786 * context binding state tracker.
787 */
788static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
789{
790 struct vmw_ctx_binding *entry;
791
792 list_for_each_entry(entry, &cbs->list, ctx_list) {
793 if (!entry->bi.scrubbed) {
794 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
795 entry->bi.scrubbed = true;
796 }
797 }
798}
799
173fb7d4
TH
800/**
801 * vmw_context_binding_res_list_kill - Kill all bindings on a
802 * resource binding list
803 *
804 * @head: list head of resource binding list
805 *
806 * Kills all bindings associated with a specific resource. Typically
807 * called before the resource is destroyed.
808 */
809void vmw_context_binding_res_list_kill(struct list_head *head)
810{
811 struct vmw_ctx_binding *entry, *next;
812
813 list_for_each_entry_safe(entry, next, head, res_list)
814 vmw_context_binding_kill(entry);
815}
816
30f82d81
TH
817/**
818 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
819 * resource binding list
820 *
821 * @head: list head of resource binding list
822 *
823 * Scrub all bindings associated with a specific resource. Typically
824 * called before the resource is evicted.
825 */
826void vmw_context_binding_res_list_scrub(struct list_head *head)
827{
828 struct vmw_ctx_binding *entry;
829
830 list_for_each_entry(entry, head, res_list) {
831 if (!entry->bi.scrubbed) {
832 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
833 entry->bi.scrubbed = true;
834 }
835 }
836}
837
173fb7d4
TH
838/**
839 * vmw_context_binding_state_transfer - Commit staged binding info
840 *
841 * @ctx: Pointer to context to commit the staged binding info to.
842 * @from: Staged binding info built during execbuf.
843 *
844 * Transfers binding info from a temporary structure to the persistent
845 * structure in the context. This can be done once commands
846 */
847void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
848 struct vmw_ctx_binding_state *from)
849{
850 struct vmw_user_context *uctx =
851 container_of(ctx, struct vmw_user_context, res);
852 struct vmw_ctx_binding *entry, *next;
853
854 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
855 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
b5c3b1a6 856}
30f82d81
TH
857
858/**
859 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
860 *
861 * @ctx: The context resource
862 *
863 * Walks through the context binding list and rebinds all scrubbed
864 * resources.
865 */
866int vmw_context_rebind_all(struct vmw_resource *ctx)
867{
868 struct vmw_ctx_binding *entry;
869 struct vmw_user_context *uctx =
870 container_of(ctx, struct vmw_user_context, res);
871 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
872 int ret;
873
874 list_for_each_entry(entry, &cbs->list, ctx_list) {
875 if (likely(!entry->bi.scrubbed))
876 continue;
877
878 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
879 SVGA3D_INVALID_ID))
880 continue;
881
882 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
883 if (unlikely(ret != 0))
884 return ret;
885
886 entry->bi.scrubbed = false;
887 }
888
889 return 0;
890}
891
892/**
893 * vmw_context_binding_list - Return a list of context bindings
894 *
895 * @ctx: The context resource
896 *
897 * Returns the current list of bindings of the given context. Note that
898 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
899 */
900struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
901{
902 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
903}
This page took 0.128972 seconds and 5 git commands to generate.