Merge tag 'trace-fixes-3.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_context.c
1 /**************************************************************************
2 *
3 * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_resource_priv.h"
30 #include "ttm/ttm_placement.h"
31
32 struct vmw_user_context {
33 struct ttm_base_object base;
34 struct vmw_resource res;
35 struct vmw_ctx_binding_state cbs;
36 struct vmw_cmdbuf_res_manager *man;
37 };
38
39
40
41 typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
42
43 static void vmw_user_context_free(struct vmw_resource *res);
44 static struct vmw_resource *
45 vmw_user_context_base_to_res(struct ttm_base_object *base);
46
47 static int vmw_gb_context_create(struct vmw_resource *res);
48 static int vmw_gb_context_bind(struct vmw_resource *res,
49 struct ttm_validate_buffer *val_buf);
50 static int vmw_gb_context_unbind(struct vmw_resource *res,
51 bool readback,
52 struct ttm_validate_buffer *val_buf);
53 static int vmw_gb_context_destroy(struct vmw_resource *res);
54 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
55 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
56 bool rebind);
57 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
58 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
59 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
60 static uint64_t vmw_user_context_size;
61
62 static const struct vmw_user_resource_conv user_context_conv = {
63 .object_type = VMW_RES_CONTEXT,
64 .base_obj_to_res = vmw_user_context_base_to_res,
65 .res_free = vmw_user_context_free
66 };
67
68 const struct vmw_user_resource_conv *user_context_converter =
69 &user_context_conv;
70
71
72 static const struct vmw_res_func vmw_legacy_context_func = {
73 .res_type = vmw_res_context,
74 .needs_backup = false,
75 .may_evict = false,
76 .type_name = "legacy contexts",
77 .backup_placement = NULL,
78 .create = NULL,
79 .destroy = NULL,
80 .bind = NULL,
81 .unbind = NULL
82 };
83
84 static const struct vmw_res_func vmw_gb_context_func = {
85 .res_type = vmw_res_context,
86 .needs_backup = true,
87 .may_evict = true,
88 .type_name = "guest backed contexts",
89 .backup_placement = &vmw_mob_placement,
90 .create = vmw_gb_context_create,
91 .destroy = vmw_gb_context_destroy,
92 .bind = vmw_gb_context_bind,
93 .unbind = vmw_gb_context_unbind
94 };
95
96 static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
97 [vmw_ctx_binding_shader] = vmw_context_scrub_shader,
98 [vmw_ctx_binding_rt] = vmw_context_scrub_render_target,
99 [vmw_ctx_binding_tex] = vmw_context_scrub_texture };
100
101 /**
102 * Context management:
103 */
104
105 static void vmw_hw_context_destroy(struct vmw_resource *res)
106 {
107 struct vmw_user_context *uctx =
108 container_of(res, struct vmw_user_context, res);
109 struct vmw_private *dev_priv = res->dev_priv;
110 struct {
111 SVGA3dCmdHeader header;
112 SVGA3dCmdDestroyContext body;
113 } *cmd;
114
115
116 if (res->func->destroy == vmw_gb_context_destroy) {
117 mutex_lock(&dev_priv->cmdbuf_mutex);
118 vmw_cmdbuf_res_man_destroy(uctx->man);
119 mutex_lock(&dev_priv->binding_mutex);
120 (void) vmw_context_binding_state_kill(&uctx->cbs);
121 (void) vmw_gb_context_destroy(res);
122 mutex_unlock(&dev_priv->binding_mutex);
123 if (dev_priv->pinned_bo != NULL &&
124 !dev_priv->query_cid_valid)
125 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
126 mutex_unlock(&dev_priv->cmdbuf_mutex);
127 return;
128 }
129
130 vmw_execbuf_release_pinned_bo(dev_priv);
131 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
132 if (unlikely(cmd == NULL)) {
133 DRM_ERROR("Failed reserving FIFO space for surface "
134 "destruction.\n");
135 return;
136 }
137
138 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
139 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
140 cmd->body.cid = cpu_to_le32(res->id);
141
142 vmw_fifo_commit(dev_priv, sizeof(*cmd));
143 vmw_3d_resource_dec(dev_priv, false);
144 }
145
146 static int vmw_gb_context_init(struct vmw_private *dev_priv,
147 struct vmw_resource *res,
148 void (*res_free) (struct vmw_resource *res))
149 {
150 int ret;
151 struct vmw_user_context *uctx =
152 container_of(res, struct vmw_user_context, res);
153
154 ret = vmw_resource_init(dev_priv, res, true,
155 res_free, &vmw_gb_context_func);
156 res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
157 if (unlikely(ret != 0))
158 goto out_err;
159
160 if (dev_priv->has_mob) {
161 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
162 if (unlikely(IS_ERR(uctx->man))) {
163 ret = PTR_ERR(uctx->man);
164 uctx->man = NULL;
165 goto out_err;
166 }
167 }
168
169 memset(&uctx->cbs, 0, sizeof(uctx->cbs));
170 INIT_LIST_HEAD(&uctx->cbs.list);
171
172 vmw_resource_activate(res, vmw_hw_context_destroy);
173 return 0;
174
175 out_err:
176 if (res_free)
177 res_free(res);
178 else
179 kfree(res);
180 return ret;
181 }
182
183 static int vmw_context_init(struct vmw_private *dev_priv,
184 struct vmw_resource *res,
185 void (*res_free) (struct vmw_resource *res))
186 {
187 int ret;
188
189 struct {
190 SVGA3dCmdHeader header;
191 SVGA3dCmdDefineContext body;
192 } *cmd;
193
194 if (dev_priv->has_mob)
195 return vmw_gb_context_init(dev_priv, res, res_free);
196
197 ret = vmw_resource_init(dev_priv, res, false,
198 res_free, &vmw_legacy_context_func);
199
200 if (unlikely(ret != 0)) {
201 DRM_ERROR("Failed to allocate a resource id.\n");
202 goto out_early;
203 }
204
205 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
206 DRM_ERROR("Out of hw context ids.\n");
207 vmw_resource_unreference(&res);
208 return -ENOMEM;
209 }
210
211 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
212 if (unlikely(cmd == NULL)) {
213 DRM_ERROR("Fifo reserve failed.\n");
214 vmw_resource_unreference(&res);
215 return -ENOMEM;
216 }
217
218 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
219 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
220 cmd->body.cid = cpu_to_le32(res->id);
221
222 vmw_fifo_commit(dev_priv, sizeof(*cmd));
223 (void) vmw_3d_resource_inc(dev_priv, false);
224 vmw_resource_activate(res, vmw_hw_context_destroy);
225 return 0;
226
227 out_early:
228 if (res_free == NULL)
229 kfree(res);
230 else
231 res_free(res);
232 return ret;
233 }
234
235 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
236 {
237 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
238 int ret;
239
240 if (unlikely(res == NULL))
241 return NULL;
242
243 ret = vmw_context_init(dev_priv, res, NULL);
244
245 return (ret == 0) ? res : NULL;
246 }
247
248
249 static int vmw_gb_context_create(struct vmw_resource *res)
250 {
251 struct vmw_private *dev_priv = res->dev_priv;
252 int ret;
253 struct {
254 SVGA3dCmdHeader header;
255 SVGA3dCmdDefineGBContext body;
256 } *cmd;
257
258 if (likely(res->id != -1))
259 return 0;
260
261 ret = vmw_resource_alloc_id(res);
262 if (unlikely(ret != 0)) {
263 DRM_ERROR("Failed to allocate a context id.\n");
264 goto out_no_id;
265 }
266
267 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
268 ret = -EBUSY;
269 goto out_no_fifo;
270 }
271
272 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
273 if (unlikely(cmd == NULL)) {
274 DRM_ERROR("Failed reserving FIFO space for context "
275 "creation.\n");
276 ret = -ENOMEM;
277 goto out_no_fifo;
278 }
279
280 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
281 cmd->header.size = sizeof(cmd->body);
282 cmd->body.cid = res->id;
283 vmw_fifo_commit(dev_priv, sizeof(*cmd));
284 (void) vmw_3d_resource_inc(dev_priv, false);
285
286 return 0;
287
288 out_no_fifo:
289 vmw_resource_release_id(res);
290 out_no_id:
291 return ret;
292 }
293
294 static int vmw_gb_context_bind(struct vmw_resource *res,
295 struct ttm_validate_buffer *val_buf)
296 {
297 struct vmw_private *dev_priv = res->dev_priv;
298 struct {
299 SVGA3dCmdHeader header;
300 SVGA3dCmdBindGBContext body;
301 } *cmd;
302 struct ttm_buffer_object *bo = val_buf->bo;
303
304 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
305
306 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
307 if (unlikely(cmd == NULL)) {
308 DRM_ERROR("Failed reserving FIFO space for context "
309 "binding.\n");
310 return -ENOMEM;
311 }
312
313 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
314 cmd->header.size = sizeof(cmd->body);
315 cmd->body.cid = res->id;
316 cmd->body.mobid = bo->mem.start;
317 cmd->body.validContents = res->backup_dirty;
318 res->backup_dirty = false;
319 vmw_fifo_commit(dev_priv, sizeof(*cmd));
320
321 return 0;
322 }
323
324 static int vmw_gb_context_unbind(struct vmw_resource *res,
325 bool readback,
326 struct ttm_validate_buffer *val_buf)
327 {
328 struct vmw_private *dev_priv = res->dev_priv;
329 struct ttm_buffer_object *bo = val_buf->bo;
330 struct vmw_fence_obj *fence;
331 struct vmw_user_context *uctx =
332 container_of(res, struct vmw_user_context, res);
333
334 struct {
335 SVGA3dCmdHeader header;
336 SVGA3dCmdReadbackGBContext body;
337 } *cmd1;
338 struct {
339 SVGA3dCmdHeader header;
340 SVGA3dCmdBindGBContext body;
341 } *cmd2;
342 uint32_t submit_size;
343 uint8_t *cmd;
344
345
346 BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
347
348 mutex_lock(&dev_priv->binding_mutex);
349 vmw_context_binding_state_scrub(&uctx->cbs);
350
351 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
352
353 cmd = vmw_fifo_reserve(dev_priv, submit_size);
354 if (unlikely(cmd == NULL)) {
355 DRM_ERROR("Failed reserving FIFO space for context "
356 "unbinding.\n");
357 mutex_unlock(&dev_priv->binding_mutex);
358 return -ENOMEM;
359 }
360
361 cmd2 = (void *) cmd;
362 if (readback) {
363 cmd1 = (void *) cmd;
364 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
365 cmd1->header.size = sizeof(cmd1->body);
366 cmd1->body.cid = res->id;
367 cmd2 = (void *) (&cmd1[1]);
368 }
369 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
370 cmd2->header.size = sizeof(cmd2->body);
371 cmd2->body.cid = res->id;
372 cmd2->body.mobid = SVGA3D_INVALID_ID;
373
374 vmw_fifo_commit(dev_priv, submit_size);
375 mutex_unlock(&dev_priv->binding_mutex);
376
377 /*
378 * Create a fence object and fence the backup buffer.
379 */
380
381 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
382 &fence, NULL);
383
384 vmw_fence_single_bo(bo, fence);
385
386 if (likely(fence != NULL))
387 vmw_fence_obj_unreference(&fence);
388
389 return 0;
390 }
391
392 static int vmw_gb_context_destroy(struct vmw_resource *res)
393 {
394 struct vmw_private *dev_priv = res->dev_priv;
395 struct {
396 SVGA3dCmdHeader header;
397 SVGA3dCmdDestroyGBContext body;
398 } *cmd;
399
400 if (likely(res->id == -1))
401 return 0;
402
403 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
404 if (unlikely(cmd == NULL)) {
405 DRM_ERROR("Failed reserving FIFO space for context "
406 "destruction.\n");
407 return -ENOMEM;
408 }
409
410 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
411 cmd->header.size = sizeof(cmd->body);
412 cmd->body.cid = res->id;
413 vmw_fifo_commit(dev_priv, sizeof(*cmd));
414 if (dev_priv->query_cid == res->id)
415 dev_priv->query_cid_valid = false;
416 vmw_resource_release_id(res);
417 vmw_3d_resource_dec(dev_priv, false);
418
419 return 0;
420 }
421
422 /**
423 * User-space context management:
424 */
425
426 static struct vmw_resource *
427 vmw_user_context_base_to_res(struct ttm_base_object *base)
428 {
429 return &(container_of(base, struct vmw_user_context, base)->res);
430 }
431
432 static void vmw_user_context_free(struct vmw_resource *res)
433 {
434 struct vmw_user_context *ctx =
435 container_of(res, struct vmw_user_context, res);
436 struct vmw_private *dev_priv = res->dev_priv;
437
438 ttm_base_object_kfree(ctx, base);
439 ttm_mem_global_free(vmw_mem_glob(dev_priv),
440 vmw_user_context_size);
441 }
442
443 /**
444 * This function is called when user space has no more references on the
445 * base object. It releases the base-object's reference on the resource object.
446 */
447
448 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
449 {
450 struct ttm_base_object *base = *p_base;
451 struct vmw_user_context *ctx =
452 container_of(base, struct vmw_user_context, base);
453 struct vmw_resource *res = &ctx->res;
454
455 *p_base = NULL;
456 vmw_resource_unreference(&res);
457 }
458
459 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
460 struct drm_file *file_priv)
461 {
462 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
463 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
464
465 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE);
466 }
467
468 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
469 struct drm_file *file_priv)
470 {
471 struct vmw_private *dev_priv = vmw_priv(dev);
472 struct vmw_user_context *ctx;
473 struct vmw_resource *res;
474 struct vmw_resource *tmp;
475 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
476 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
477 int ret;
478
479
480 /*
481 * Approximate idr memory usage with 128 bytes. It will be limited
482 * by maximum number_of contexts anyway.
483 */
484
485 if (unlikely(vmw_user_context_size == 0))
486 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
487 ((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
488
489 ret = ttm_read_lock(&dev_priv->reservation_sem, true);
490 if (unlikely(ret != 0))
491 return ret;
492
493 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
494 vmw_user_context_size,
495 false, true);
496 if (unlikely(ret != 0)) {
497 if (ret != -ERESTARTSYS)
498 DRM_ERROR("Out of graphics memory for context"
499 " creation.\n");
500 goto out_unlock;
501 }
502
503 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
504 if (unlikely(ctx == NULL)) {
505 ttm_mem_global_free(vmw_mem_glob(dev_priv),
506 vmw_user_context_size);
507 ret = -ENOMEM;
508 goto out_unlock;
509 }
510
511 res = &ctx->res;
512 ctx->base.shareable = false;
513 ctx->base.tfile = NULL;
514
515 /*
516 * From here on, the destructor takes over resource freeing.
517 */
518
519 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
520 if (unlikely(ret != 0))
521 goto out_unlock;
522
523 tmp = vmw_resource_reference(&ctx->res);
524 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
525 &vmw_user_context_base_release, NULL);
526
527 if (unlikely(ret != 0)) {
528 vmw_resource_unreference(&tmp);
529 goto out_err;
530 }
531
532 arg->cid = ctx->base.hash.key;
533 out_err:
534 vmw_resource_unreference(&res);
535 out_unlock:
536 ttm_read_unlock(&dev_priv->reservation_sem);
537 return ret;
538
539 }
540
541 /**
542 * vmw_context_scrub_shader - scrub a shader binding from a context.
543 *
544 * @bi: single binding information.
545 * @rebind: Whether to issue a bind instead of scrub command.
546 */
547 static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
548 {
549 struct vmw_private *dev_priv = bi->ctx->dev_priv;
550 struct {
551 SVGA3dCmdHeader header;
552 SVGA3dCmdSetShader body;
553 } *cmd;
554
555 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
556 if (unlikely(cmd == NULL)) {
557 DRM_ERROR("Failed reserving FIFO space for shader "
558 "unbinding.\n");
559 return -ENOMEM;
560 }
561
562 cmd->header.id = SVGA_3D_CMD_SET_SHADER;
563 cmd->header.size = sizeof(cmd->body);
564 cmd->body.cid = bi->ctx->id;
565 cmd->body.type = bi->i1.shader_type;
566 cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
567 vmw_fifo_commit(dev_priv, sizeof(*cmd));
568
569 return 0;
570 }
571
572 /**
573 * vmw_context_scrub_render_target - scrub a render target binding
574 * from a context.
575 *
576 * @bi: single binding information.
577 * @rebind: Whether to issue a bind instead of scrub command.
578 */
579 static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
580 bool rebind)
581 {
582 struct vmw_private *dev_priv = bi->ctx->dev_priv;
583 struct {
584 SVGA3dCmdHeader header;
585 SVGA3dCmdSetRenderTarget body;
586 } *cmd;
587
588 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
589 if (unlikely(cmd == NULL)) {
590 DRM_ERROR("Failed reserving FIFO space for render target "
591 "unbinding.\n");
592 return -ENOMEM;
593 }
594
595 cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET;
596 cmd->header.size = sizeof(cmd->body);
597 cmd->body.cid = bi->ctx->id;
598 cmd->body.type = bi->i1.rt_type;
599 cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
600 cmd->body.target.face = 0;
601 cmd->body.target.mipmap = 0;
602 vmw_fifo_commit(dev_priv, sizeof(*cmd));
603
604 return 0;
605 }
606
607 /**
608 * vmw_context_scrub_texture - scrub a texture binding from a context.
609 *
610 * @bi: single binding information.
611 * @rebind: Whether to issue a bind instead of scrub command.
612 *
613 * TODO: Possibly complement this function with a function that takes
614 * a list of texture bindings and combines them to a single command.
615 */
616 static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
617 bool rebind)
618 {
619 struct vmw_private *dev_priv = bi->ctx->dev_priv;
620 struct {
621 SVGA3dCmdHeader header;
622 struct {
623 SVGA3dCmdSetTextureState c;
624 SVGA3dTextureState s1;
625 } body;
626 } *cmd;
627
628 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
629 if (unlikely(cmd == NULL)) {
630 DRM_ERROR("Failed reserving FIFO space for texture "
631 "unbinding.\n");
632 return -ENOMEM;
633 }
634
635
636 cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE;
637 cmd->header.size = sizeof(cmd->body);
638 cmd->body.c.cid = bi->ctx->id;
639 cmd->body.s1.stage = bi->i1.texture_stage;
640 cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
641 cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
642 vmw_fifo_commit(dev_priv, sizeof(*cmd));
643
644 return 0;
645 }
646
647 /**
648 * vmw_context_binding_drop: Stop tracking a context binding
649 *
650 * @cb: Pointer to binding tracker storage.
651 *
652 * Stops tracking a context binding, and re-initializes its storage.
653 * Typically used when the context binding is replaced with a binding to
654 * another (or the same, for that matter) resource.
655 */
656 static void vmw_context_binding_drop(struct vmw_ctx_binding *cb)
657 {
658 list_del(&cb->ctx_list);
659 if (!list_empty(&cb->res_list))
660 list_del(&cb->res_list);
661 cb->bi.ctx = NULL;
662 }
663
664 /**
665 * vmw_context_binding_add: Start tracking a context binding
666 *
667 * @cbs: Pointer to the context binding state tracker.
668 * @bi: Information about the binding to track.
669 *
670 * Performs basic checks on the binding to make sure arguments are within
671 * bounds and then starts tracking the binding in the context binding
672 * state structure @cbs.
673 */
674 int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
675 const struct vmw_ctx_bindinfo *bi)
676 {
677 struct vmw_ctx_binding *loc;
678
679 switch (bi->bt) {
680 case vmw_ctx_binding_rt:
681 if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) {
682 DRM_ERROR("Illegal render target type %u.\n",
683 (unsigned) bi->i1.rt_type);
684 return -EINVAL;
685 }
686 loc = &cbs->render_targets[bi->i1.rt_type];
687 break;
688 case vmw_ctx_binding_tex:
689 if (unlikely((unsigned)bi->i1.texture_stage >=
690 SVGA3D_NUM_TEXTURE_UNITS)) {
691 DRM_ERROR("Illegal texture/sampler unit %u.\n",
692 (unsigned) bi->i1.texture_stage);
693 return -EINVAL;
694 }
695 loc = &cbs->texture_units[bi->i1.texture_stage];
696 break;
697 case vmw_ctx_binding_shader:
698 if (unlikely((unsigned)bi->i1.shader_type >=
699 SVGA3D_SHADERTYPE_MAX)) {
700 DRM_ERROR("Illegal shader type %u.\n",
701 (unsigned) bi->i1.shader_type);
702 return -EINVAL;
703 }
704 loc = &cbs->shaders[bi->i1.shader_type];
705 break;
706 default:
707 BUG();
708 }
709
710 if (loc->bi.ctx != NULL)
711 vmw_context_binding_drop(loc);
712
713 loc->bi = *bi;
714 loc->bi.scrubbed = false;
715 list_add_tail(&loc->ctx_list, &cbs->list);
716 INIT_LIST_HEAD(&loc->res_list);
717
718 return 0;
719 }
720
721 /**
722 * vmw_context_binding_transfer: Transfer a context binding tracking entry.
723 *
724 * @cbs: Pointer to the persistent context binding state tracker.
725 * @bi: Information about the binding to track.
726 *
727 */
728 static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
729 const struct vmw_ctx_bindinfo *bi)
730 {
731 struct vmw_ctx_binding *loc;
732
733 switch (bi->bt) {
734 case vmw_ctx_binding_rt:
735 loc = &cbs->render_targets[bi->i1.rt_type];
736 break;
737 case vmw_ctx_binding_tex:
738 loc = &cbs->texture_units[bi->i1.texture_stage];
739 break;
740 case vmw_ctx_binding_shader:
741 loc = &cbs->shaders[bi->i1.shader_type];
742 break;
743 default:
744 BUG();
745 }
746
747 if (loc->bi.ctx != NULL)
748 vmw_context_binding_drop(loc);
749
750 if (bi->res != NULL) {
751 loc->bi = *bi;
752 list_add_tail(&loc->ctx_list, &cbs->list);
753 list_add_tail(&loc->res_list, &bi->res->binding_head);
754 }
755 }
756
757 /**
758 * vmw_context_binding_kill - Kill a binding on the device
759 * and stop tracking it.
760 *
761 * @cb: Pointer to binding tracker storage.
762 *
763 * Emits FIFO commands to scrub a binding represented by @cb.
764 * Then stops tracking the binding and re-initializes its storage.
765 */
766 static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
767 {
768 if (!cb->bi.scrubbed) {
769 (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
770 cb->bi.scrubbed = true;
771 }
772 vmw_context_binding_drop(cb);
773 }
774
775 /**
776 * vmw_context_binding_state_kill - Kill all bindings associated with a
777 * struct vmw_ctx_binding state structure, and re-initialize the structure.
778 *
779 * @cbs: Pointer to the context binding state tracker.
780 *
781 * Emits commands to scrub all bindings associated with the
782 * context binding state tracker. Then re-initializes the whole structure.
783 */
784 static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
785 {
786 struct vmw_ctx_binding *entry, *next;
787
788 list_for_each_entry_safe(entry, next, &cbs->list, ctx_list)
789 vmw_context_binding_kill(entry);
790 }
791
792 /**
793 * vmw_context_binding_state_scrub - Scrub all bindings associated with a
794 * struct vmw_ctx_binding state structure.
795 *
796 * @cbs: Pointer to the context binding state tracker.
797 *
798 * Emits commands to scrub all bindings associated with the
799 * context binding state tracker.
800 */
801 static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
802 {
803 struct vmw_ctx_binding *entry;
804
805 list_for_each_entry(entry, &cbs->list, ctx_list) {
806 if (!entry->bi.scrubbed) {
807 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
808 entry->bi.scrubbed = true;
809 }
810 }
811 }
812
813 /**
814 * vmw_context_binding_res_list_kill - Kill all bindings on a
815 * resource binding list
816 *
817 * @head: list head of resource binding list
818 *
819 * Kills all bindings associated with a specific resource. Typically
820 * called before the resource is destroyed.
821 */
822 void vmw_context_binding_res_list_kill(struct list_head *head)
823 {
824 struct vmw_ctx_binding *entry, *next;
825
826 list_for_each_entry_safe(entry, next, head, res_list)
827 vmw_context_binding_kill(entry);
828 }
829
830 /**
831 * vmw_context_binding_res_list_scrub - Scrub all bindings on a
832 * resource binding list
833 *
834 * @head: list head of resource binding list
835 *
836 * Scrub all bindings associated with a specific resource. Typically
837 * called before the resource is evicted.
838 */
839 void vmw_context_binding_res_list_scrub(struct list_head *head)
840 {
841 struct vmw_ctx_binding *entry;
842
843 list_for_each_entry(entry, head, res_list) {
844 if (!entry->bi.scrubbed) {
845 (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
846 entry->bi.scrubbed = true;
847 }
848 }
849 }
850
851 /**
852 * vmw_context_binding_state_transfer - Commit staged binding info
853 *
854 * @ctx: Pointer to context to commit the staged binding info to.
855 * @from: Staged binding info built during execbuf.
856 *
857 * Transfers binding info from a temporary structure to the persistent
858 * structure in the context. This can be done once commands
859 */
860 void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
861 struct vmw_ctx_binding_state *from)
862 {
863 struct vmw_user_context *uctx =
864 container_of(ctx, struct vmw_user_context, res);
865 struct vmw_ctx_binding *entry, *next;
866
867 list_for_each_entry_safe(entry, next, &from->list, ctx_list)
868 vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
869 }
870
871 /**
872 * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
873 *
874 * @ctx: The context resource
875 *
876 * Walks through the context binding list and rebinds all scrubbed
877 * resources.
878 */
879 int vmw_context_rebind_all(struct vmw_resource *ctx)
880 {
881 struct vmw_ctx_binding *entry;
882 struct vmw_user_context *uctx =
883 container_of(ctx, struct vmw_user_context, res);
884 struct vmw_ctx_binding_state *cbs = &uctx->cbs;
885 int ret;
886
887 list_for_each_entry(entry, &cbs->list, ctx_list) {
888 if (likely(!entry->bi.scrubbed))
889 continue;
890
891 if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
892 SVGA3D_INVALID_ID))
893 continue;
894
895 ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
896 if (unlikely(ret != 0))
897 return ret;
898
899 entry->bi.scrubbed = false;
900 }
901
902 return 0;
903 }
904
905 /**
906 * vmw_context_binding_list - Return a list of context bindings
907 *
908 * @ctx: The context resource
909 *
910 * Returns the current list of bindings of the given context. Note that
911 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
912 */
913 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
914 {
915 return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
916 }
917
918 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
919 {
920 return container_of(ctx, struct vmw_user_context, res)->man;
921 }
This page took 0.0920879999999999 seconds and 5 git commands to generate.