Merge branch 'for-linus' of git://neil.brown.name/md
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 /**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include "vmwgfx_drm.h"
30 #include "ttm/ttm_object.h"
31 #include "ttm/ttm_placement.h"
32 #include "drmP.h"
33
34 #define VMW_RES_CONTEXT ttm_driver_type0
35 #define VMW_RES_SURFACE ttm_driver_type1
36 #define VMW_RES_STREAM ttm_driver_type2
37
38 struct vmw_user_context {
39 struct ttm_base_object base;
40 struct vmw_resource res;
41 };
42
43 struct vmw_user_surface {
44 struct ttm_base_object base;
45 struct vmw_surface srf;
46 };
47
48 struct vmw_user_dma_buffer {
49 struct ttm_base_object base;
50 struct vmw_dma_buffer dma;
51 };
52
53 struct vmw_bo_user_rep {
54 uint32_t handle;
55 uint64_t map_handle;
56 };
57
58 struct vmw_stream {
59 struct vmw_resource res;
60 uint32_t stream_id;
61 };
62
63 struct vmw_user_stream {
64 struct ttm_base_object base;
65 struct vmw_stream stream;
66 };
67
68 static inline struct vmw_dma_buffer *
69 vmw_dma_buffer(struct ttm_buffer_object *bo)
70 {
71 return container_of(bo, struct vmw_dma_buffer, base);
72 }
73
74 static inline struct vmw_user_dma_buffer *
75 vmw_user_dma_buffer(struct ttm_buffer_object *bo)
76 {
77 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
78 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
79 }
80
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
82 {
83 kref_get(&res->kref);
84 return res;
85 }
86
87 static void vmw_resource_release(struct kref *kref)
88 {
89 struct vmw_resource *res =
90 container_of(kref, struct vmw_resource, kref);
91 struct vmw_private *dev_priv = res->dev_priv;
92
93 idr_remove(res->idr, res->id);
94 write_unlock(&dev_priv->resource_lock);
95
96 if (likely(res->hw_destroy != NULL))
97 res->hw_destroy(res);
98
99 if (res->res_free != NULL)
100 res->res_free(res);
101 else
102 kfree(res);
103
104 write_lock(&dev_priv->resource_lock);
105 }
106
107 void vmw_resource_unreference(struct vmw_resource **p_res)
108 {
109 struct vmw_resource *res = *p_res;
110 struct vmw_private *dev_priv = res->dev_priv;
111
112 *p_res = NULL;
113 write_lock(&dev_priv->resource_lock);
114 kref_put(&res->kref, vmw_resource_release);
115 write_unlock(&dev_priv->resource_lock);
116 }
117
118 static int vmw_resource_init(struct vmw_private *dev_priv,
119 struct vmw_resource *res,
120 struct idr *idr,
121 enum ttm_object_type obj_type,
122 void (*res_free) (struct vmw_resource *res))
123 {
124 int ret;
125
126 kref_init(&res->kref);
127 res->hw_destroy = NULL;
128 res->res_free = res_free;
129 res->res_type = obj_type;
130 res->idr = idr;
131 res->avail = false;
132 res->dev_priv = dev_priv;
133
134 do {
135 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
136 return -ENOMEM;
137
138 write_lock(&dev_priv->resource_lock);
139 ret = idr_get_new_above(idr, res, 1, &res->id);
140 write_unlock(&dev_priv->resource_lock);
141
142 } while (ret == -EAGAIN);
143
144 return ret;
145 }
146
147 /**
148 * vmw_resource_activate
149 *
150 * @res: Pointer to the newly created resource
151 * @hw_destroy: Destroy function. NULL if none.
152 *
153 * Activate a resource after the hardware has been made aware of it.
154 * Set tye destroy function to @destroy. Typically this frees the
155 * resource and destroys the hardware resources associated with it.
156 * Activate basically means that the function vmw_resource_lookup will
157 * find it.
158 */
159
160 static void vmw_resource_activate(struct vmw_resource *res,
161 void (*hw_destroy) (struct vmw_resource *))
162 {
163 struct vmw_private *dev_priv = res->dev_priv;
164
165 write_lock(&dev_priv->resource_lock);
166 res->avail = true;
167 res->hw_destroy = hw_destroy;
168 write_unlock(&dev_priv->resource_lock);
169 }
170
171 struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
172 struct idr *idr, int id)
173 {
174 struct vmw_resource *res;
175
176 read_lock(&dev_priv->resource_lock);
177 res = idr_find(idr, id);
178 if (res && res->avail)
179 kref_get(&res->kref);
180 else
181 res = NULL;
182 read_unlock(&dev_priv->resource_lock);
183
184 if (unlikely(res == NULL))
185 return NULL;
186
187 return res;
188 }
189
190 /**
191 * Context management:
192 */
193
194 static void vmw_hw_context_destroy(struct vmw_resource *res)
195 {
196
197 struct vmw_private *dev_priv = res->dev_priv;
198 struct {
199 SVGA3dCmdHeader header;
200 SVGA3dCmdDestroyContext body;
201 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
202
203 if (unlikely(cmd == NULL)) {
204 DRM_ERROR("Failed reserving FIFO space for surface "
205 "destruction.\n");
206 return;
207 }
208
209 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
210 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
211 cmd->body.cid = cpu_to_le32(res->id);
212
213 vmw_fifo_commit(dev_priv, sizeof(*cmd));
214 }
215
216 static int vmw_context_init(struct vmw_private *dev_priv,
217 struct vmw_resource *res,
218 void (*res_free) (struct vmw_resource *res))
219 {
220 int ret;
221
222 struct {
223 SVGA3dCmdHeader header;
224 SVGA3dCmdDefineContext body;
225 } *cmd;
226
227 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
228 VMW_RES_CONTEXT, res_free);
229
230 if (unlikely(ret != 0)) {
231 if (res_free == NULL)
232 kfree(res);
233 else
234 res_free(res);
235 return ret;
236 }
237
238 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
239 if (unlikely(cmd == NULL)) {
240 DRM_ERROR("Fifo reserve failed.\n");
241 vmw_resource_unreference(&res);
242 return -ENOMEM;
243 }
244
245 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
246 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
247 cmd->body.cid = cpu_to_le32(res->id);
248
249 vmw_fifo_commit(dev_priv, sizeof(*cmd));
250 vmw_resource_activate(res, vmw_hw_context_destroy);
251 return 0;
252 }
253
254 struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
255 {
256 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
257 int ret;
258
259 if (unlikely(res == NULL))
260 return NULL;
261
262 ret = vmw_context_init(dev_priv, res, NULL);
263 return (ret == 0) ? res : NULL;
264 }
265
266 /**
267 * User-space context management:
268 */
269
270 static void vmw_user_context_free(struct vmw_resource *res)
271 {
272 struct vmw_user_context *ctx =
273 container_of(res, struct vmw_user_context, res);
274
275 kfree(ctx);
276 }
277
278 /**
279 * This function is called when user space has no more references on the
280 * base object. It releases the base-object's reference on the resource object.
281 */
282
283 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
284 {
285 struct ttm_base_object *base = *p_base;
286 struct vmw_user_context *ctx =
287 container_of(base, struct vmw_user_context, base);
288 struct vmw_resource *res = &ctx->res;
289
290 *p_base = NULL;
291 vmw_resource_unreference(&res);
292 }
293
294 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
295 struct drm_file *file_priv)
296 {
297 struct vmw_private *dev_priv = vmw_priv(dev);
298 struct vmw_resource *res;
299 struct vmw_user_context *ctx;
300 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
301 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
302 int ret = 0;
303
304 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
305 if (unlikely(res == NULL))
306 return -EINVAL;
307
308 if (res->res_free != &vmw_user_context_free) {
309 ret = -EINVAL;
310 goto out;
311 }
312
313 ctx = container_of(res, struct vmw_user_context, res);
314 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
315 ret = -EPERM;
316 goto out;
317 }
318
319 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
320 out:
321 vmw_resource_unreference(&res);
322 return ret;
323 }
324
325 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
326 struct drm_file *file_priv)
327 {
328 struct vmw_private *dev_priv = vmw_priv(dev);
329 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
330 struct vmw_resource *res;
331 struct vmw_resource *tmp;
332 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
333 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
334 int ret;
335
336 if (unlikely(ctx == NULL))
337 return -ENOMEM;
338
339 res = &ctx->res;
340 ctx->base.shareable = false;
341 ctx->base.tfile = NULL;
342
343 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
344 if (unlikely(ret != 0))
345 return ret;
346
347 tmp = vmw_resource_reference(&ctx->res);
348 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
349 &vmw_user_context_base_release, NULL);
350
351 if (unlikely(ret != 0)) {
352 vmw_resource_unreference(&tmp);
353 goto out_err;
354 }
355
356 arg->cid = res->id;
357 out_err:
358 vmw_resource_unreference(&res);
359 return ret;
360
361 }
362
363 int vmw_context_check(struct vmw_private *dev_priv,
364 struct ttm_object_file *tfile,
365 int id)
366 {
367 struct vmw_resource *res;
368 int ret = 0;
369
370 read_lock(&dev_priv->resource_lock);
371 res = idr_find(&dev_priv->context_idr, id);
372 if (res && res->avail) {
373 struct vmw_user_context *ctx =
374 container_of(res, struct vmw_user_context, res);
375 if (ctx->base.tfile != tfile && !ctx->base.shareable)
376 ret = -EPERM;
377 } else
378 ret = -EINVAL;
379 read_unlock(&dev_priv->resource_lock);
380
381 return ret;
382 }
383
384
385 /**
386 * Surface management.
387 */
388
389 static void vmw_hw_surface_destroy(struct vmw_resource *res)
390 {
391
392 struct vmw_private *dev_priv = res->dev_priv;
393 struct {
394 SVGA3dCmdHeader header;
395 SVGA3dCmdDestroySurface body;
396 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
397
398 if (unlikely(cmd == NULL)) {
399 DRM_ERROR("Failed reserving FIFO space for surface "
400 "destruction.\n");
401 return;
402 }
403
404 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
405 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
406 cmd->body.sid = cpu_to_le32(res->id);
407
408 vmw_fifo_commit(dev_priv, sizeof(*cmd));
409 }
410
411 void vmw_surface_res_free(struct vmw_resource *res)
412 {
413 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
414
415 kfree(srf->sizes);
416 kfree(srf->snooper.image);
417 kfree(srf);
418 }
419
420 int vmw_surface_init(struct vmw_private *dev_priv,
421 struct vmw_surface *srf,
422 void (*res_free) (struct vmw_resource *res))
423 {
424 int ret;
425 struct {
426 SVGA3dCmdHeader header;
427 SVGA3dCmdDefineSurface body;
428 } *cmd;
429 SVGA3dSize *cmd_size;
430 struct vmw_resource *res = &srf->res;
431 struct drm_vmw_size *src_size;
432 size_t submit_size;
433 uint32_t cmd_len;
434 int i;
435
436 BUG_ON(res_free == NULL);
437 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
438 VMW_RES_SURFACE, res_free);
439
440 if (unlikely(ret != 0)) {
441 res_free(res);
442 return ret;
443 }
444
445 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
446 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
447
448 cmd = vmw_fifo_reserve(dev_priv, submit_size);
449 if (unlikely(cmd == NULL)) {
450 DRM_ERROR("Fifo reserve failed for create surface.\n");
451 vmw_resource_unreference(&res);
452 return -ENOMEM;
453 }
454
455 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
456 cmd->header.size = cpu_to_le32(cmd_len);
457 cmd->body.sid = cpu_to_le32(res->id);
458 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
459 cmd->body.format = cpu_to_le32(srf->format);
460 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
461 cmd->body.face[i].numMipLevels =
462 cpu_to_le32(srf->mip_levels[i]);
463 }
464
465 cmd += 1;
466 cmd_size = (SVGA3dSize *) cmd;
467 src_size = srf->sizes;
468
469 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
470 cmd_size->width = cpu_to_le32(src_size->width);
471 cmd_size->height = cpu_to_le32(src_size->height);
472 cmd_size->depth = cpu_to_le32(src_size->depth);
473 }
474
475 vmw_fifo_commit(dev_priv, submit_size);
476 vmw_resource_activate(res, vmw_hw_surface_destroy);
477 return 0;
478 }
479
480 static void vmw_user_surface_free(struct vmw_resource *res)
481 {
482 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
483 struct vmw_user_surface *user_srf =
484 container_of(srf, struct vmw_user_surface, srf);
485
486 kfree(srf->sizes);
487 kfree(srf->snooper.image);
488 kfree(user_srf);
489 }
490
491 int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
492 struct ttm_object_file *tfile,
493 uint32_t handle, struct vmw_surface **out)
494 {
495 struct vmw_resource *res;
496 struct vmw_surface *srf;
497 struct vmw_user_surface *user_srf;
498 struct ttm_base_object *base;
499 int ret = -EINVAL;
500
501 base = ttm_base_object_lookup(tfile, handle);
502 if (unlikely(base == NULL))
503 return -EINVAL;
504
505 if (unlikely(base->object_type != VMW_RES_SURFACE))
506 goto out_bad_resource;
507
508 user_srf = container_of(base, struct vmw_user_surface, base);
509 srf = &user_srf->srf;
510 res = &srf->res;
511
512 read_lock(&dev_priv->resource_lock);
513
514 if (!res->avail || res->res_free != &vmw_user_surface_free) {
515 read_unlock(&dev_priv->resource_lock);
516 goto out_bad_resource;
517 }
518
519 kref_get(&res->kref);
520 read_unlock(&dev_priv->resource_lock);
521
522 *out = srf;
523 ret = 0;
524
525 out_bad_resource:
526 ttm_base_object_unref(&base);
527
528 return ret;
529 }
530
531 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
532 {
533 struct ttm_base_object *base = *p_base;
534 struct vmw_user_surface *user_srf =
535 container_of(base, struct vmw_user_surface, base);
536 struct vmw_resource *res = &user_srf->srf.res;
537
538 *p_base = NULL;
539 vmw_resource_unreference(&res);
540 }
541
542 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
543 struct drm_file *file_priv)
544 {
545 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
546 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
547
548 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
549 }
550
551 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
552 struct drm_file *file_priv)
553 {
554 struct vmw_private *dev_priv = vmw_priv(dev);
555 struct vmw_user_surface *user_srf =
556 kmalloc(sizeof(*user_srf), GFP_KERNEL);
557 struct vmw_surface *srf;
558 struct vmw_resource *res;
559 struct vmw_resource *tmp;
560 union drm_vmw_surface_create_arg *arg =
561 (union drm_vmw_surface_create_arg *)data;
562 struct drm_vmw_surface_create_req *req = &arg->req;
563 struct drm_vmw_surface_arg *rep = &arg->rep;
564 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
565 struct drm_vmw_size __user *user_sizes;
566 int ret;
567 int i;
568
569 if (unlikely(user_srf == NULL))
570 return -ENOMEM;
571
572 srf = &user_srf->srf;
573 res = &srf->res;
574
575 srf->flags = req->flags;
576 srf->format = req->format;
577 srf->scanout = req->scanout;
578 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
579 srf->num_sizes = 0;
580 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
581 srf->num_sizes += srf->mip_levels[i];
582
583 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
584 DRM_VMW_MAX_MIP_LEVELS) {
585 ret = -EINVAL;
586 goto out_err0;
587 }
588
589 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
590 if (unlikely(srf->sizes == NULL)) {
591 ret = -ENOMEM;
592 goto out_err0;
593 }
594
595 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
596 req->size_addr;
597
598 ret = copy_from_user(srf->sizes, user_sizes,
599 srf->num_sizes * sizeof(*srf->sizes));
600 if (unlikely(ret != 0)) {
601 ret = -EFAULT;
602 goto out_err1;
603 }
604
605 if (srf->scanout &&
606 srf->num_sizes == 1 &&
607 srf->sizes[0].width == 64 &&
608 srf->sizes[0].height == 64 &&
609 srf->format == SVGA3D_A8R8G8B8) {
610
611 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
612 /* clear the image */
613 if (srf->snooper.image) {
614 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
615 } else {
616 DRM_ERROR("Failed to allocate cursor_image\n");
617 ret = -ENOMEM;
618 goto out_err1;
619 }
620 } else {
621 srf->snooper.image = NULL;
622 }
623 srf->snooper.crtc = NULL;
624
625 user_srf->base.shareable = false;
626 user_srf->base.tfile = NULL;
627
628 /**
629 * From this point, the generic resource management functions
630 * destroy the object on failure.
631 */
632
633 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
634 if (unlikely(ret != 0))
635 return ret;
636
637 tmp = vmw_resource_reference(&srf->res);
638 ret = ttm_base_object_init(tfile, &user_srf->base,
639 req->shareable, VMW_RES_SURFACE,
640 &vmw_user_surface_base_release, NULL);
641
642 if (unlikely(ret != 0)) {
643 vmw_resource_unreference(&tmp);
644 vmw_resource_unreference(&res);
645 return ret;
646 }
647
648 rep->sid = user_srf->base.hash.key;
649 if (rep->sid == SVGA3D_INVALID_ID)
650 DRM_ERROR("Created bad Surface ID.\n");
651
652 vmw_resource_unreference(&res);
653 return 0;
654 out_err1:
655 kfree(srf->sizes);
656 out_err0:
657 kfree(user_srf);
658 return ret;
659 }
660
661 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
662 struct drm_file *file_priv)
663 {
664 union drm_vmw_surface_reference_arg *arg =
665 (union drm_vmw_surface_reference_arg *)data;
666 struct drm_vmw_surface_arg *req = &arg->req;
667 struct drm_vmw_surface_create_req *rep = &arg->rep;
668 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
669 struct vmw_surface *srf;
670 struct vmw_user_surface *user_srf;
671 struct drm_vmw_size __user *user_sizes;
672 struct ttm_base_object *base;
673 int ret = -EINVAL;
674
675 base = ttm_base_object_lookup(tfile, req->sid);
676 if (unlikely(base == NULL)) {
677 DRM_ERROR("Could not find surface to reference.\n");
678 return -EINVAL;
679 }
680
681 if (unlikely(base->object_type != VMW_RES_SURFACE))
682 goto out_bad_resource;
683
684 user_srf = container_of(base, struct vmw_user_surface, base);
685 srf = &user_srf->srf;
686
687 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
688 if (unlikely(ret != 0)) {
689 DRM_ERROR("Could not add a reference to a surface.\n");
690 goto out_no_reference;
691 }
692
693 rep->flags = srf->flags;
694 rep->format = srf->format;
695 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
696 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
697 rep->size_addr;
698
699 if (user_sizes)
700 ret = copy_to_user(user_sizes, srf->sizes,
701 srf->num_sizes * sizeof(*srf->sizes));
702 if (unlikely(ret != 0)) {
703 DRM_ERROR("copy_to_user failed %p %u\n",
704 user_sizes, srf->num_sizes);
705 ret = -EFAULT;
706 }
707 out_bad_resource:
708 out_no_reference:
709 ttm_base_object_unref(&base);
710
711 return ret;
712 }
713
714 int vmw_surface_check(struct vmw_private *dev_priv,
715 struct ttm_object_file *tfile,
716 uint32_t handle, int *id)
717 {
718 struct ttm_base_object *base;
719 struct vmw_user_surface *user_srf;
720
721 int ret = -EPERM;
722
723 base = ttm_base_object_lookup(tfile, handle);
724 if (unlikely(base == NULL))
725 return -EINVAL;
726
727 if (unlikely(base->object_type != VMW_RES_SURFACE))
728 goto out_bad_surface;
729
730 user_srf = container_of(base, struct vmw_user_surface, base);
731 *id = user_srf->srf.res.id;
732 ret = 0;
733
734 out_bad_surface:
735 /**
736 * FIXME: May deadlock here when called from the
737 * command parsing code.
738 */
739
740 ttm_base_object_unref(&base);
741 return ret;
742 }
743
744 /**
745 * Buffer management.
746 */
747
748 static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
749 unsigned long num_pages)
750 {
751 static size_t bo_user_size = ~0;
752
753 size_t page_array_size =
754 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
755
756 if (unlikely(bo_user_size == ~0)) {
757 bo_user_size = glob->ttm_bo_extra_size +
758 ttm_round_pot(sizeof(struct vmw_dma_buffer));
759 }
760
761 return bo_user_size + page_array_size;
762 }
763
764 void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
765 {
766 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
767 struct ttm_bo_global *glob = bo->glob;
768 struct vmw_private *dev_priv =
769 container_of(bo->bdev, struct vmw_private, bdev);
770
771 if (vmw_bo->gmr_bound) {
772 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
773 spin_lock(&glob->lru_lock);
774 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
775 spin_unlock(&glob->lru_lock);
776 vmw_bo->gmr_bound = false;
777 }
778 }
779
780 void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
781 {
782 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
783 struct ttm_bo_global *glob = bo->glob;
784
785 vmw_dmabuf_gmr_unbind(bo);
786 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
787 kfree(vmw_bo);
788 }
789
790 int vmw_dmabuf_init(struct vmw_private *dev_priv,
791 struct vmw_dma_buffer *vmw_bo,
792 size_t size, struct ttm_placement *placement,
793 bool interruptible,
794 void (*bo_free) (struct ttm_buffer_object *bo))
795 {
796 struct ttm_bo_device *bdev = &dev_priv->bdev;
797 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
798 size_t acc_size;
799 int ret;
800
801 BUG_ON(!bo_free);
802
803 acc_size =
804 vmw_dmabuf_acc_size(bdev->glob,
805 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
806
807 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
808 if (unlikely(ret != 0)) {
809 /* we must free the bo here as
810 * ttm_buffer_object_init does so as well */
811 bo_free(&vmw_bo->base);
812 return ret;
813 }
814
815 memset(vmw_bo, 0, sizeof(*vmw_bo));
816
817 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
818 INIT_LIST_HEAD(&vmw_bo->validate_list);
819 vmw_bo->gmr_id = 0;
820 vmw_bo->gmr_bound = false;
821
822 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
823 ttm_bo_type_device, placement,
824 0, 0, interruptible,
825 NULL, acc_size, bo_free);
826 return ret;
827 }
828
829 static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
830 {
831 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
832 struct ttm_bo_global *glob = bo->glob;
833
834 vmw_dmabuf_gmr_unbind(bo);
835 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
836 kfree(vmw_user_bo);
837 }
838
839 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
840 {
841 struct vmw_user_dma_buffer *vmw_user_bo;
842 struct ttm_base_object *base = *p_base;
843 struct ttm_buffer_object *bo;
844
845 *p_base = NULL;
846
847 if (unlikely(base == NULL))
848 return;
849
850 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
851 bo = &vmw_user_bo->dma.base;
852 ttm_bo_unref(&bo);
853 }
854
855 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
856 struct drm_file *file_priv)
857 {
858 struct vmw_private *dev_priv = vmw_priv(dev);
859 union drm_vmw_alloc_dmabuf_arg *arg =
860 (union drm_vmw_alloc_dmabuf_arg *)data;
861 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
862 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
863 struct vmw_user_dma_buffer *vmw_user_bo;
864 struct ttm_buffer_object *tmp;
865 struct vmw_master *vmaster = vmw_master(file_priv->master);
866 int ret;
867
868 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
869 if (unlikely(vmw_user_bo == NULL))
870 return -ENOMEM;
871
872 ret = ttm_read_lock(&vmaster->lock, true);
873 if (unlikely(ret != 0)) {
874 kfree(vmw_user_bo);
875 return ret;
876 }
877
878 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
879 &vmw_vram_sys_placement, true,
880 &vmw_user_dmabuf_destroy);
881 if (unlikely(ret != 0))
882 return ret;
883
884 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
885 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
886 &vmw_user_bo->base,
887 false,
888 ttm_buffer_type,
889 &vmw_user_dmabuf_release, NULL);
890 if (unlikely(ret != 0)) {
891 ttm_bo_unref(&tmp);
892 } else {
893 rep->handle = vmw_user_bo->base.hash.key;
894 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
895 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
896 rep->cur_gmr_offset = 0;
897 }
898 ttm_bo_unref(&tmp);
899
900 ttm_read_unlock(&vmaster->lock);
901
902 return 0;
903 }
904
905 int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
906 struct drm_file *file_priv)
907 {
908 struct drm_vmw_unref_dmabuf_arg *arg =
909 (struct drm_vmw_unref_dmabuf_arg *)data;
910
911 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
912 arg->handle,
913 TTM_REF_USAGE);
914 }
915
916 uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
917 uint32_t cur_validate_node)
918 {
919 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
920
921 if (likely(vmw_bo->on_validate_list))
922 return vmw_bo->cur_validate_node;
923
924 vmw_bo->cur_validate_node = cur_validate_node;
925 vmw_bo->on_validate_list = true;
926
927 return cur_validate_node;
928 }
929
930 void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
931 {
932 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
933
934 vmw_bo->on_validate_list = false;
935 }
936
937 uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
938 {
939 struct vmw_dma_buffer *vmw_bo;
940
941 if (bo->mem.mem_type == TTM_PL_VRAM)
942 return SVGA_GMR_FRAMEBUFFER;
943
944 vmw_bo = vmw_dma_buffer(bo);
945
946 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
947 }
948
949 void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
950 {
951 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
952 vmw_bo->gmr_bound = true;
953 vmw_bo->gmr_id = id;
954 }
955
956 int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
957 uint32_t handle, struct vmw_dma_buffer **out)
958 {
959 struct vmw_user_dma_buffer *vmw_user_bo;
960 struct ttm_base_object *base;
961
962 base = ttm_base_object_lookup(tfile, handle);
963 if (unlikely(base == NULL)) {
964 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
965 (unsigned long)handle);
966 return -ESRCH;
967 }
968
969 if (unlikely(base->object_type != ttm_buffer_type)) {
970 ttm_base_object_unref(&base);
971 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
972 (unsigned long)handle);
973 return -EINVAL;
974 }
975
976 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
977 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
978 ttm_base_object_unref(&base);
979 *out = &vmw_user_bo->dma;
980
981 return 0;
982 }
983
984 /**
985 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
986 * when we're out of ids, causing GMR space to be allocated
987 * out of VRAM.
988 */
989
990 int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
991 {
992 struct ttm_bo_global *glob = dev_priv->bdev.glob;
993 int id;
994 int ret;
995
996 do {
997 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
998 return -ENOMEM;
999
1000 spin_lock(&glob->lru_lock);
1001 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1002 spin_unlock(&glob->lru_lock);
1003 } while (ret == -EAGAIN);
1004
1005 if (unlikely(ret != 0))
1006 return ret;
1007
1008 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1009 spin_lock(&glob->lru_lock);
1010 ida_remove(&dev_priv->gmr_ida, id);
1011 spin_unlock(&glob->lru_lock);
1012 return -EBUSY;
1013 }
1014
1015 *p_id = (uint32_t) id;
1016 return 0;
1017 }
1018
1019 /*
1020 * Stream management
1021 */
1022
1023 static void vmw_stream_destroy(struct vmw_resource *res)
1024 {
1025 struct vmw_private *dev_priv = res->dev_priv;
1026 struct vmw_stream *stream;
1027 int ret;
1028
1029 DRM_INFO("%s: unref\n", __func__);
1030 stream = container_of(res, struct vmw_stream, res);
1031
1032 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1033 WARN_ON(ret != 0);
1034 }
1035
1036 static int vmw_stream_init(struct vmw_private *dev_priv,
1037 struct vmw_stream *stream,
1038 void (*res_free) (struct vmw_resource *res))
1039 {
1040 struct vmw_resource *res = &stream->res;
1041 int ret;
1042
1043 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1044 VMW_RES_STREAM, res_free);
1045
1046 if (unlikely(ret != 0)) {
1047 if (res_free == NULL)
1048 kfree(stream);
1049 else
1050 res_free(&stream->res);
1051 return ret;
1052 }
1053
1054 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1055 if (ret) {
1056 vmw_resource_unreference(&res);
1057 return ret;
1058 }
1059
1060 DRM_INFO("%s: claimed\n", __func__);
1061
1062 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1063 return 0;
1064 }
1065
1066 /**
1067 * User-space context management:
1068 */
1069
1070 static void vmw_user_stream_free(struct vmw_resource *res)
1071 {
1072 struct vmw_user_stream *stream =
1073 container_of(res, struct vmw_user_stream, stream.res);
1074
1075 kfree(stream);
1076 }
1077
1078 /**
1079 * This function is called when user space has no more references on the
1080 * base object. It releases the base-object's reference on the resource object.
1081 */
1082
1083 static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1084 {
1085 struct ttm_base_object *base = *p_base;
1086 struct vmw_user_stream *stream =
1087 container_of(base, struct vmw_user_stream, base);
1088 struct vmw_resource *res = &stream->stream.res;
1089
1090 *p_base = NULL;
1091 vmw_resource_unreference(&res);
1092 }
1093
1094 int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1095 struct drm_file *file_priv)
1096 {
1097 struct vmw_private *dev_priv = vmw_priv(dev);
1098 struct vmw_resource *res;
1099 struct vmw_user_stream *stream;
1100 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1101 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1102 int ret = 0;
1103
1104 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1105 if (unlikely(res == NULL))
1106 return -EINVAL;
1107
1108 if (res->res_free != &vmw_user_stream_free) {
1109 ret = -EINVAL;
1110 goto out;
1111 }
1112
1113 stream = container_of(res, struct vmw_user_stream, stream.res);
1114 if (stream->base.tfile != tfile) {
1115 ret = -EINVAL;
1116 goto out;
1117 }
1118
1119 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1120 out:
1121 vmw_resource_unreference(&res);
1122 return ret;
1123 }
1124
1125 int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1126 struct drm_file *file_priv)
1127 {
1128 struct vmw_private *dev_priv = vmw_priv(dev);
1129 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1130 struct vmw_resource *res;
1131 struct vmw_resource *tmp;
1132 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1133 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1134 int ret;
1135
1136 if (unlikely(stream == NULL))
1137 return -ENOMEM;
1138
1139 res = &stream->stream.res;
1140 stream->base.shareable = false;
1141 stream->base.tfile = NULL;
1142
1143 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1144 if (unlikely(ret != 0))
1145 return ret;
1146
1147 tmp = vmw_resource_reference(res);
1148 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1149 &vmw_user_stream_base_release, NULL);
1150
1151 if (unlikely(ret != 0)) {
1152 vmw_resource_unreference(&tmp);
1153 goto out_err;
1154 }
1155
1156 arg->stream_id = res->id;
1157 out_err:
1158 vmw_resource_unreference(&res);
1159 return ret;
1160 }
1161
1162 int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1163 struct ttm_object_file *tfile,
1164 uint32_t *inout_id, struct vmw_resource **out)
1165 {
1166 struct vmw_user_stream *stream;
1167 struct vmw_resource *res;
1168 int ret;
1169
1170 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1171 if (unlikely(res == NULL))
1172 return -EINVAL;
1173
1174 if (res->res_free != &vmw_user_stream_free) {
1175 ret = -EINVAL;
1176 goto err_ref;
1177 }
1178
1179 stream = container_of(res, struct vmw_user_stream, stream.res);
1180 if (stream->base.tfile != tfile) {
1181 ret = -EPERM;
1182 goto err_ref;
1183 }
1184
1185 *inout_id = stream->stream.stream_id;
1186 *out = res;
1187 return 0;
1188 err_ref:
1189 vmw_resource_unreference(&res);
1190 return ret;
1191 }
This page took 0.070024 seconds and 5 git commands to generate.