drm/vmwgfx: Report propper framebuffer_{max|min}_{width|height}
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
29#include "vmwgfx_drm.h"
30#include "ttm/ttm_object.h"
31#include "ttm/ttm_placement.h"
32#include "drmP.h"
33
34#define VMW_RES_CONTEXT ttm_driver_type0
35#define VMW_RES_SURFACE ttm_driver_type1
36#define VMW_RES_STREAM ttm_driver_type2
37
5ffdb658
JB
38/* XXX: This isn't a real hardware flag, but just a hack for kernel to
39 * know about primary surfaces. Find a better way to accomplish this.
40 */
41#define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
42
fb1d9738
JB
43struct vmw_user_context {
44 struct ttm_base_object base;
45 struct vmw_resource res;
46};
47
48struct vmw_user_surface {
49 struct ttm_base_object base;
50 struct vmw_surface srf;
51};
52
53struct vmw_user_dma_buffer {
54 struct ttm_base_object base;
55 struct vmw_dma_buffer dma;
56};
57
58struct vmw_bo_user_rep {
59 uint32_t handle;
60 uint64_t map_handle;
61};
62
63struct vmw_stream {
64 struct vmw_resource res;
65 uint32_t stream_id;
66};
67
68struct vmw_user_stream {
69 struct ttm_base_object base;
70 struct vmw_stream stream;
71};
72
73static inline struct vmw_dma_buffer *
74vmw_dma_buffer(struct ttm_buffer_object *bo)
75{
76 return container_of(bo, struct vmw_dma_buffer, base);
77}
78
79static inline struct vmw_user_dma_buffer *
80vmw_user_dma_buffer(struct ttm_buffer_object *bo)
81{
82 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
83 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
84}
85
86struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
87{
88 kref_get(&res->kref);
89 return res;
90}
91
92static void vmw_resource_release(struct kref *kref)
93{
94 struct vmw_resource *res =
95 container_of(kref, struct vmw_resource, kref);
96 struct vmw_private *dev_priv = res->dev_priv;
97
98 idr_remove(res->idr, res->id);
99 write_unlock(&dev_priv->resource_lock);
100
101 if (likely(res->hw_destroy != NULL))
102 res->hw_destroy(res);
103
104 if (res->res_free != NULL)
105 res->res_free(res);
106 else
107 kfree(res);
108
109 write_lock(&dev_priv->resource_lock);
110}
111
112void vmw_resource_unreference(struct vmw_resource **p_res)
113{
114 struct vmw_resource *res = *p_res;
115 struct vmw_private *dev_priv = res->dev_priv;
116
117 *p_res = NULL;
118 write_lock(&dev_priv->resource_lock);
119 kref_put(&res->kref, vmw_resource_release);
120 write_unlock(&dev_priv->resource_lock);
121}
122
123static int vmw_resource_init(struct vmw_private *dev_priv,
124 struct vmw_resource *res,
125 struct idr *idr,
126 enum ttm_object_type obj_type,
127 void (*res_free) (struct vmw_resource *res))
128{
129 int ret;
130
131 kref_init(&res->kref);
132 res->hw_destroy = NULL;
133 res->res_free = res_free;
134 res->res_type = obj_type;
135 res->idr = idr;
136 res->avail = false;
137 res->dev_priv = dev_priv;
138
139 do {
140 if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
141 return -ENOMEM;
142
143 write_lock(&dev_priv->resource_lock);
144 ret = idr_get_new_above(idr, res, 1, &res->id);
145 write_unlock(&dev_priv->resource_lock);
146
147 } while (ret == -EAGAIN);
148
149 return ret;
150}
151
152/**
153 * vmw_resource_activate
154 *
155 * @res: Pointer to the newly created resource
156 * @hw_destroy: Destroy function. NULL if none.
157 *
158 * Activate a resource after the hardware has been made aware of it.
159 * Set tye destroy function to @destroy. Typically this frees the
160 * resource and destroys the hardware resources associated with it.
161 * Activate basically means that the function vmw_resource_lookup will
162 * find it.
163 */
164
165static void vmw_resource_activate(struct vmw_resource *res,
166 void (*hw_destroy) (struct vmw_resource *))
167{
168 struct vmw_private *dev_priv = res->dev_priv;
169
170 write_lock(&dev_priv->resource_lock);
171 res->avail = true;
172 res->hw_destroy = hw_destroy;
173 write_unlock(&dev_priv->resource_lock);
174}
175
176struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
177 struct idr *idr, int id)
178{
179 struct vmw_resource *res;
180
181 read_lock(&dev_priv->resource_lock);
182 res = idr_find(idr, id);
183 if (res && res->avail)
184 kref_get(&res->kref);
185 else
186 res = NULL;
187 read_unlock(&dev_priv->resource_lock);
188
189 if (unlikely(res == NULL))
190 return NULL;
191
192 return res;
193}
194
195/**
196 * Context management:
197 */
198
199static void vmw_hw_context_destroy(struct vmw_resource *res)
200{
201
202 struct vmw_private *dev_priv = res->dev_priv;
203 struct {
204 SVGA3dCmdHeader header;
205 SVGA3dCmdDestroyContext body;
206 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
207
208 if (unlikely(cmd == NULL)) {
209 DRM_ERROR("Failed reserving FIFO space for surface "
210 "destruction.\n");
211 return;
212 }
213
214 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
215 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
216 cmd->body.cid = cpu_to_le32(res->id);
217
218 vmw_fifo_commit(dev_priv, sizeof(*cmd));
219}
220
221static int vmw_context_init(struct vmw_private *dev_priv,
222 struct vmw_resource *res,
223 void (*res_free) (struct vmw_resource *res))
224{
225 int ret;
226
227 struct {
228 SVGA3dCmdHeader header;
229 SVGA3dCmdDefineContext body;
230 } *cmd;
231
232 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
233 VMW_RES_CONTEXT, res_free);
234
235 if (unlikely(ret != 0)) {
236 if (res_free == NULL)
237 kfree(res);
238 else
239 res_free(res);
240 return ret;
241 }
242
243 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
244 if (unlikely(cmd == NULL)) {
245 DRM_ERROR("Fifo reserve failed.\n");
246 vmw_resource_unreference(&res);
247 return -ENOMEM;
248 }
249
250 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
251 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
252 cmd->body.cid = cpu_to_le32(res->id);
253
254 vmw_fifo_commit(dev_priv, sizeof(*cmd));
255 vmw_resource_activate(res, vmw_hw_context_destroy);
256 return 0;
257}
258
259struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
260{
261 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
262 int ret;
263
264 if (unlikely(res == NULL))
265 return NULL;
266
267 ret = vmw_context_init(dev_priv, res, NULL);
268 return (ret == 0) ? res : NULL;
269}
270
271/**
272 * User-space context management:
273 */
274
275static void vmw_user_context_free(struct vmw_resource *res)
276{
277 struct vmw_user_context *ctx =
278 container_of(res, struct vmw_user_context, res);
279
280 kfree(ctx);
281}
282
283/**
284 * This function is called when user space has no more references on the
285 * base object. It releases the base-object's reference on the resource object.
286 */
287
288static void vmw_user_context_base_release(struct ttm_base_object **p_base)
289{
290 struct ttm_base_object *base = *p_base;
291 struct vmw_user_context *ctx =
292 container_of(base, struct vmw_user_context, base);
293 struct vmw_resource *res = &ctx->res;
294
295 *p_base = NULL;
296 vmw_resource_unreference(&res);
297}
298
299int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
300 struct drm_file *file_priv)
301{
302 struct vmw_private *dev_priv = vmw_priv(dev);
303 struct vmw_resource *res;
304 struct vmw_user_context *ctx;
305 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
306 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
307 int ret = 0;
308
309 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
310 if (unlikely(res == NULL))
311 return -EINVAL;
312
313 if (res->res_free != &vmw_user_context_free) {
314 ret = -EINVAL;
315 goto out;
316 }
317
318 ctx = container_of(res, struct vmw_user_context, res);
319 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
320 ret = -EPERM;
321 goto out;
322 }
323
324 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
325out:
326 vmw_resource_unreference(&res);
327 return ret;
328}
329
330int vmw_context_define_ioctl(struct drm_device *dev, void *data,
331 struct drm_file *file_priv)
332{
333 struct vmw_private *dev_priv = vmw_priv(dev);
334 struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
335 struct vmw_resource *res;
336 struct vmw_resource *tmp;
337 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
338 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
339 int ret;
340
341 if (unlikely(ctx == NULL))
342 return -ENOMEM;
343
344 res = &ctx->res;
345 ctx->base.shareable = false;
346 ctx->base.tfile = NULL;
347
348 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
349 if (unlikely(ret != 0))
350 return ret;
351
352 tmp = vmw_resource_reference(&ctx->res);
353 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
354 &vmw_user_context_base_release, NULL);
355
356 if (unlikely(ret != 0)) {
357 vmw_resource_unreference(&tmp);
358 goto out_err;
359 }
360
361 arg->cid = res->id;
362out_err:
363 vmw_resource_unreference(&res);
364 return ret;
365
366}
367
368int vmw_context_check(struct vmw_private *dev_priv,
369 struct ttm_object_file *tfile,
370 int id)
371{
372 struct vmw_resource *res;
373 int ret = 0;
374
375 read_lock(&dev_priv->resource_lock);
376 res = idr_find(&dev_priv->context_idr, id);
377 if (res && res->avail) {
378 struct vmw_user_context *ctx =
379 container_of(res, struct vmw_user_context, res);
380 if (ctx->base.tfile != tfile && !ctx->base.shareable)
381 ret = -EPERM;
382 } else
383 ret = -EINVAL;
384 read_unlock(&dev_priv->resource_lock);
385
386 return ret;
387}
388
389
390/**
391 * Surface management.
392 */
393
394static void vmw_hw_surface_destroy(struct vmw_resource *res)
395{
396
397 struct vmw_private *dev_priv = res->dev_priv;
398 struct {
399 SVGA3dCmdHeader header;
400 SVGA3dCmdDestroySurface body;
401 } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
402
403 if (unlikely(cmd == NULL)) {
404 DRM_ERROR("Failed reserving FIFO space for surface "
405 "destruction.\n");
406 return;
407 }
408
409 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
410 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
411 cmd->body.sid = cpu_to_le32(res->id);
412
413 vmw_fifo_commit(dev_priv, sizeof(*cmd));
414}
415
416void vmw_surface_res_free(struct vmw_resource *res)
417{
418 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
419
420 kfree(srf->sizes);
421 kfree(srf->snooper.image);
422 kfree(srf);
423}
424
425int vmw_surface_init(struct vmw_private *dev_priv,
426 struct vmw_surface *srf,
427 void (*res_free) (struct vmw_resource *res))
428{
429 int ret;
430 struct {
431 SVGA3dCmdHeader header;
432 SVGA3dCmdDefineSurface body;
433 } *cmd;
434 SVGA3dSize *cmd_size;
435 struct vmw_resource *res = &srf->res;
436 struct drm_vmw_size *src_size;
437 size_t submit_size;
438 uint32_t cmd_len;
439 int i;
440
441 BUG_ON(res_free == NULL);
442 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
443 VMW_RES_SURFACE, res_free);
444
445 if (unlikely(ret != 0)) {
446 res_free(res);
447 return ret;
448 }
449
450 submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
451 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
452
453 cmd = vmw_fifo_reserve(dev_priv, submit_size);
454 if (unlikely(cmd == NULL)) {
455 DRM_ERROR("Fifo reserve failed for create surface.\n");
456 vmw_resource_unreference(&res);
457 return -ENOMEM;
458 }
459
460 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
461 cmd->header.size = cpu_to_le32(cmd_len);
462 cmd->body.sid = cpu_to_le32(res->id);
463 cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
464 cmd->body.format = cpu_to_le32(srf->format);
465 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
466 cmd->body.face[i].numMipLevels =
467 cpu_to_le32(srf->mip_levels[i]);
468 }
469
470 cmd += 1;
471 cmd_size = (SVGA3dSize *) cmd;
472 src_size = srf->sizes;
473
474 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
475 cmd_size->width = cpu_to_le32(src_size->width);
476 cmd_size->height = cpu_to_le32(src_size->height);
477 cmd_size->depth = cpu_to_le32(src_size->depth);
478 }
479
480 vmw_fifo_commit(dev_priv, submit_size);
481 vmw_resource_activate(res, vmw_hw_surface_destroy);
482 return 0;
483}
484
485static void vmw_user_surface_free(struct vmw_resource *res)
486{
487 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
488 struct vmw_user_surface *user_srf =
489 container_of(srf, struct vmw_user_surface, srf);
490
491 kfree(srf->sizes);
492 kfree(srf->snooper.image);
493 kfree(user_srf);
494}
495
7a73ba74
TH
496int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
497 struct ttm_object_file *tfile,
498 uint32_t handle, struct vmw_surface **out)
fb1d9738
JB
499{
500 struct vmw_resource *res;
501 struct vmw_surface *srf;
502 struct vmw_user_surface *user_srf;
7a73ba74
TH
503 struct ttm_base_object *base;
504 int ret = -EINVAL;
fb1d9738 505
7a73ba74
TH
506 base = ttm_base_object_lookup(tfile, handle);
507 if (unlikely(base == NULL))
fb1d9738
JB
508 return -EINVAL;
509
7a73ba74
TH
510 if (unlikely(base->object_type != VMW_RES_SURFACE))
511 goto out_bad_resource;
512
513 user_srf = container_of(base, struct vmw_user_surface, base);
514 srf = &user_srf->srf;
515 res = &srf->res;
516
517 read_lock(&dev_priv->resource_lock);
518
519 if (!res->avail || res->res_free != &vmw_user_surface_free) {
520 read_unlock(&dev_priv->resource_lock);
521 goto out_bad_resource;
522 }
fb1d9738 523
7a73ba74
TH
524 kref_get(&res->kref);
525 read_unlock(&dev_priv->resource_lock);
fb1d9738
JB
526
527 *out = srf;
7a73ba74
TH
528 ret = 0;
529
530out_bad_resource:
531 ttm_base_object_unref(&base);
532
533 return ret;
fb1d9738
JB
534}
535
536static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
537{
538 struct ttm_base_object *base = *p_base;
539 struct vmw_user_surface *user_srf =
540 container_of(base, struct vmw_user_surface, base);
541 struct vmw_resource *res = &user_srf->srf.res;
542
543 *p_base = NULL;
544 vmw_resource_unreference(&res);
545}
546
547int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
548 struct drm_file *file_priv)
549{
fb1d9738
JB
550 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
551 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
fb1d9738 552
7a73ba74 553 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
fb1d9738
JB
554}
555
556int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
557 struct drm_file *file_priv)
558{
559 struct vmw_private *dev_priv = vmw_priv(dev);
560 struct vmw_user_surface *user_srf =
561 kmalloc(sizeof(*user_srf), GFP_KERNEL);
562 struct vmw_surface *srf;
563 struct vmw_resource *res;
564 struct vmw_resource *tmp;
565 union drm_vmw_surface_create_arg *arg =
566 (union drm_vmw_surface_create_arg *)data;
567 struct drm_vmw_surface_create_req *req = &arg->req;
568 struct drm_vmw_surface_arg *rep = &arg->rep;
569 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
570 struct drm_vmw_size __user *user_sizes;
571 int ret;
572 int i;
573
574 if (unlikely(user_srf == NULL))
575 return -ENOMEM;
576
577 srf = &user_srf->srf;
578 res = &srf->res;
579
580 srf->flags = req->flags;
581 srf->format = req->format;
582 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
583 srf->num_sizes = 0;
584 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
585 srf->num_sizes += srf->mip_levels[i];
586
587 if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
588 DRM_VMW_MAX_MIP_LEVELS) {
589 ret = -EINVAL;
590 goto out_err0;
591 }
592
593 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
594 if (unlikely(srf->sizes == NULL)) {
595 ret = -ENOMEM;
596 goto out_err0;
597 }
598
599 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
600 req->size_addr;
601
602 ret = copy_from_user(srf->sizes, user_sizes,
603 srf->num_sizes * sizeof(*srf->sizes));
604 if (unlikely(ret != 0))
605 goto out_err1;
606
5ffdb658
JB
607 if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
608 /* we should not send this flag down to hardware since
609 * its not a official one
610 */
611 srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
612 srf->scanout = true;
f77cef3d
TH
613 } else if (req->scanout)
614 srf->scanout = true;
615 else
5ffdb658 616 srf->scanout = false;
50ec3b7c 617
5ffdb658 618 if (srf->scanout &&
50ec3b7c
TH
619 srf->num_sizes == 1 &&
620 srf->sizes[0].width == 64 &&
621 srf->sizes[0].height == 64 &&
622 srf->format == SVGA3D_A8R8G8B8) {
623
624 srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
625 /* clear the image */
626 if (srf->snooper.image) {
627 memset(srf->snooper.image, 0x00, 64 * 64 * 4);
628 } else {
629 DRM_ERROR("Failed to allocate cursor_image\n");
630 ret = -ENOMEM;
631 goto out_err1;
632 }
633 } else {
634 srf->snooper.image = NULL;
635 }
636 srf->snooper.crtc = NULL;
637
fb1d9738
JB
638 user_srf->base.shareable = false;
639 user_srf->base.tfile = NULL;
640
641 /**
642 * From this point, the generic resource management functions
643 * destroy the object on failure.
644 */
645
646 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
647 if (unlikely(ret != 0))
648 return ret;
649
650 tmp = vmw_resource_reference(&srf->res);
651 ret = ttm_base_object_init(tfile, &user_srf->base,
652 req->shareable, VMW_RES_SURFACE,
653 &vmw_user_surface_base_release, NULL);
654
655 if (unlikely(ret != 0)) {
656 vmw_resource_unreference(&tmp);
657 vmw_resource_unreference(&res);
658 return ret;
659 }
660
7a73ba74
TH
661 rep->sid = user_srf->base.hash.key;
662 if (rep->sid == SVGA3D_INVALID_ID)
663 DRM_ERROR("Created bad Surface ID.\n");
664
fb1d9738
JB
665 vmw_resource_unreference(&res);
666 return 0;
667out_err1:
668 kfree(srf->sizes);
669out_err0:
670 kfree(user_srf);
671 return ret;
672}
673
674int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
675 struct drm_file *file_priv)
676{
fb1d9738
JB
677 union drm_vmw_surface_reference_arg *arg =
678 (union drm_vmw_surface_reference_arg *)data;
679 struct drm_vmw_surface_arg *req = &arg->req;
680 struct drm_vmw_surface_create_req *rep = &arg->rep;
681 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
fb1d9738
JB
682 struct vmw_surface *srf;
683 struct vmw_user_surface *user_srf;
684 struct drm_vmw_size __user *user_sizes;
7a73ba74
TH
685 struct ttm_base_object *base;
686 int ret = -EINVAL;
fb1d9738 687
7a73ba74
TH
688 base = ttm_base_object_lookup(tfile, req->sid);
689 if (unlikely(base == NULL)) {
690 DRM_ERROR("Could not find surface to reference.\n");
fb1d9738 691 return -EINVAL;
fb1d9738
JB
692 }
693
7a73ba74
TH
694 if (unlikely(base->object_type != VMW_RES_SURFACE))
695 goto out_bad_resource;
696
697 user_srf = container_of(base, struct vmw_user_surface, base);
698 srf = &user_srf->srf;
fb1d9738
JB
699
700 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
701 if (unlikely(ret != 0)) {
702 DRM_ERROR("Could not add a reference to a surface.\n");
7a73ba74 703 goto out_no_reference;
fb1d9738
JB
704 }
705
706 rep->flags = srf->flags;
707 rep->format = srf->format;
708 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
709 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
710 rep->size_addr;
711
712 if (user_sizes)
713 ret = copy_to_user(user_sizes, srf->sizes,
714 srf->num_sizes * sizeof(*srf->sizes));
7a73ba74 715 if (unlikely(ret != 0))
fb1d9738
JB
716 DRM_ERROR("copy_to_user failed %p %u\n",
717 user_sizes, srf->num_sizes);
7a73ba74
TH
718out_bad_resource:
719out_no_reference:
720 ttm_base_object_unref(&base);
721
fb1d9738
JB
722 return ret;
723}
724
725int vmw_surface_check(struct vmw_private *dev_priv,
726 struct ttm_object_file *tfile,
7a73ba74 727 uint32_t handle, int *id)
fb1d9738 728{
7a73ba74
TH
729 struct ttm_base_object *base;
730 struct vmw_user_surface *user_srf;
fb1d9738 731
7a73ba74 732 int ret = -EPERM;
fb1d9738 733
7a73ba74
TH
734 base = ttm_base_object_lookup(tfile, handle);
735 if (unlikely(base == NULL))
736 return -EINVAL;
737
738 if (unlikely(base->object_type != VMW_RES_SURFACE))
739 goto out_bad_surface;
740
741 user_srf = container_of(base, struct vmw_user_surface, base);
742 *id = user_srf->srf.res.id;
743 ret = 0;
744
745out_bad_surface:
746 /**
747 * FIXME: May deadlock here when called from the
748 * command parsing code.
749 */
fb1d9738 750
7a73ba74 751 ttm_base_object_unref(&base);
fb1d9738
JB
752 return ret;
753}
754
755/**
756 * Buffer management.
757 */
758
759static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
760 unsigned long num_pages)
761{
762 static size_t bo_user_size = ~0;
763
764 size_t page_array_size =
765 (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
766
767 if (unlikely(bo_user_size == ~0)) {
768 bo_user_size = glob->ttm_bo_extra_size +
769 ttm_round_pot(sizeof(struct vmw_dma_buffer));
770 }
771
772 return bo_user_size + page_array_size;
773}
774
effe1105 775void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
fb1d9738
JB
776{
777 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
778 struct ttm_bo_global *glob = bo->glob;
779 struct vmw_private *dev_priv =
780 container_of(bo->bdev, struct vmw_private, bdev);
781
fb1d9738
JB
782 if (vmw_bo->gmr_bound) {
783 vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
784 spin_lock(&glob->lru_lock);
785 ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
786 spin_unlock(&glob->lru_lock);
effe1105 787 vmw_bo->gmr_bound = false;
fb1d9738 788 }
effe1105
TH
789}
790
791void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
792{
793 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
794 struct ttm_bo_global *glob = bo->glob;
795
796 vmw_dmabuf_gmr_unbind(bo);
797 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
fb1d9738
JB
798 kfree(vmw_bo);
799}
800
801int vmw_dmabuf_init(struct vmw_private *dev_priv,
802 struct vmw_dma_buffer *vmw_bo,
803 size_t size, struct ttm_placement *placement,
804 bool interruptible,
805 void (*bo_free) (struct ttm_buffer_object *bo))
806{
807 struct ttm_bo_device *bdev = &dev_priv->bdev;
808 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
809 size_t acc_size;
810 int ret;
811
812 BUG_ON(!bo_free);
813
814 acc_size =
815 vmw_dmabuf_acc_size(bdev->glob,
816 (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
817
818 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
819 if (unlikely(ret != 0)) {
820 /* we must free the bo here as
821 * ttm_buffer_object_init does so as well */
822 bo_free(&vmw_bo->base);
823 return ret;
824 }
825
826 memset(vmw_bo, 0, sizeof(*vmw_bo));
827
828 INIT_LIST_HEAD(&vmw_bo->gmr_lru);
829 INIT_LIST_HEAD(&vmw_bo->validate_list);
830 vmw_bo->gmr_id = 0;
831 vmw_bo->gmr_bound = false;
832
833 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
834 ttm_bo_type_device, placement,
835 0, 0, interruptible,
836 NULL, acc_size, bo_free);
837 return ret;
838}
839
840static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
841{
842 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
fb1d9738 843 struct ttm_bo_global *glob = bo->glob;
fb1d9738 844
effe1105 845 vmw_dmabuf_gmr_unbind(bo);
fb1d9738 846 ttm_mem_global_free(glob->mem_glob, bo->acc_size);
fb1d9738
JB
847 kfree(vmw_user_bo);
848}
849
850static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
851{
852 struct vmw_user_dma_buffer *vmw_user_bo;
853 struct ttm_base_object *base = *p_base;
854 struct ttm_buffer_object *bo;
855
856 *p_base = NULL;
857
858 if (unlikely(base == NULL))
859 return;
860
861 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
862 bo = &vmw_user_bo->dma.base;
863 ttm_bo_unref(&bo);
864}
865
866int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
867 struct drm_file *file_priv)
868{
869 struct vmw_private *dev_priv = vmw_priv(dev);
870 union drm_vmw_alloc_dmabuf_arg *arg =
871 (union drm_vmw_alloc_dmabuf_arg *)data;
872 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
873 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
874 struct vmw_user_dma_buffer *vmw_user_bo;
875 struct ttm_buffer_object *tmp;
876 struct vmw_master *vmaster = vmw_master(file_priv->master);
877 int ret;
878
879 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
880 if (unlikely(vmw_user_bo == NULL))
881 return -ENOMEM;
882
883 ret = ttm_read_lock(&vmaster->lock, true);
884 if (unlikely(ret != 0)) {
885 kfree(vmw_user_bo);
886 return ret;
887 }
888
889 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
8ba5152a 890 &vmw_vram_sys_placement, true,
fb1d9738
JB
891 &vmw_user_dmabuf_destroy);
892 if (unlikely(ret != 0))
893 return ret;
894
895 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
896 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
897 &vmw_user_bo->base,
898 false,
899 ttm_buffer_type,
900 &vmw_user_dmabuf_release, NULL);
901 if (unlikely(ret != 0)) {
902 ttm_bo_unref(&tmp);
903 } else {
904 rep->handle = vmw_user_bo->base.hash.key;
905 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
906 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
907 rep->cur_gmr_offset = 0;
908 }
909 ttm_bo_unref(&tmp);
910
911 ttm_read_unlock(&vmaster->lock);
912
913 return 0;
914}
915
916int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
917 struct drm_file *file_priv)
918{
919 struct drm_vmw_unref_dmabuf_arg *arg =
920 (struct drm_vmw_unref_dmabuf_arg *)data;
921
922 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
923 arg->handle,
924 TTM_REF_USAGE);
925}
926
927uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
928 uint32_t cur_validate_node)
929{
930 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
931
932 if (likely(vmw_bo->on_validate_list))
933 return vmw_bo->cur_validate_node;
934
935 vmw_bo->cur_validate_node = cur_validate_node;
936 vmw_bo->on_validate_list = true;
937
938 return cur_validate_node;
939}
940
941void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
942{
943 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
944
945 vmw_bo->on_validate_list = false;
946}
947
948uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
949{
950 struct vmw_dma_buffer *vmw_bo;
951
952 if (bo->mem.mem_type == TTM_PL_VRAM)
953 return SVGA_GMR_FRAMEBUFFER;
954
955 vmw_bo = vmw_dma_buffer(bo);
956
957 return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
958}
959
960void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
961{
962 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
963 vmw_bo->gmr_bound = true;
964 vmw_bo->gmr_id = id;
965}
966
967int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
968 uint32_t handle, struct vmw_dma_buffer **out)
969{
970 struct vmw_user_dma_buffer *vmw_user_bo;
971 struct ttm_base_object *base;
972
973 base = ttm_base_object_lookup(tfile, handle);
974 if (unlikely(base == NULL)) {
975 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
976 (unsigned long)handle);
977 return -ESRCH;
978 }
979
980 if (unlikely(base->object_type != ttm_buffer_type)) {
981 ttm_base_object_unref(&base);
982 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
983 (unsigned long)handle);
984 return -EINVAL;
985 }
986
987 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
988 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
989 ttm_base_object_unref(&base);
990 *out = &vmw_user_bo->dma;
991
992 return 0;
993}
994
995/**
996 * TODO: Implement a gmr id eviction mechanism. Currently we just fail
997 * when we're out of ids, causing GMR space to be allocated
998 * out of VRAM.
999 */
1000
1001int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
1002{
1003 struct ttm_bo_global *glob = dev_priv->bdev.glob;
1004 int id;
1005 int ret;
1006
1007 do {
1008 if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
1009 return -ENOMEM;
1010
1011 spin_lock(&glob->lru_lock);
1012 ret = ida_get_new(&dev_priv->gmr_ida, &id);
1013 spin_unlock(&glob->lru_lock);
1014 } while (ret == -EAGAIN);
1015
1016 if (unlikely(ret != 0))
1017 return ret;
1018
1019 if (unlikely(id >= dev_priv->max_gmr_ids)) {
1020 spin_lock(&glob->lru_lock);
1021 ida_remove(&dev_priv->gmr_ida, id);
1022 spin_unlock(&glob->lru_lock);
1023 return -EBUSY;
1024 }
1025
1026 *p_id = (uint32_t) id;
1027 return 0;
1028}
1029
1030/*
1031 * Stream managment
1032 */
1033
1034static void vmw_stream_destroy(struct vmw_resource *res)
1035{
1036 struct vmw_private *dev_priv = res->dev_priv;
1037 struct vmw_stream *stream;
1038 int ret;
1039
1040 DRM_INFO("%s: unref\n", __func__);
1041 stream = container_of(res, struct vmw_stream, res);
1042
1043 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1044 WARN_ON(ret != 0);
1045}
1046
1047static int vmw_stream_init(struct vmw_private *dev_priv,
1048 struct vmw_stream *stream,
1049 void (*res_free) (struct vmw_resource *res))
1050{
1051 struct vmw_resource *res = &stream->res;
1052 int ret;
1053
1054 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
1055 VMW_RES_STREAM, res_free);
1056
1057 if (unlikely(ret != 0)) {
1058 if (res_free == NULL)
1059 kfree(stream);
1060 else
1061 res_free(&stream->res);
1062 return ret;
1063 }
1064
1065 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1066 if (ret) {
1067 vmw_resource_unreference(&res);
1068 return ret;
1069 }
1070
1071 DRM_INFO("%s: claimed\n", __func__);
1072
1073 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1074 return 0;
1075}
1076
1077/**
1078 * User-space context management:
1079 */
1080
1081static void vmw_user_stream_free(struct vmw_resource *res)
1082{
1083 struct vmw_user_stream *stream =
1084 container_of(res, struct vmw_user_stream, stream.res);
1085
1086 kfree(stream);
1087}
1088
1089/**
1090 * This function is called when user space has no more references on the
1091 * base object. It releases the base-object's reference on the resource object.
1092 */
1093
1094static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1095{
1096 struct ttm_base_object *base = *p_base;
1097 struct vmw_user_stream *stream =
1098 container_of(base, struct vmw_user_stream, base);
1099 struct vmw_resource *res = &stream->stream.res;
1100
1101 *p_base = NULL;
1102 vmw_resource_unreference(&res);
1103}
1104
1105int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1106 struct drm_file *file_priv)
1107{
1108 struct vmw_private *dev_priv = vmw_priv(dev);
1109 struct vmw_resource *res;
1110 struct vmw_user_stream *stream;
1111 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1112 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1113 int ret = 0;
1114
1115 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1116 if (unlikely(res == NULL))
1117 return -EINVAL;
1118
1119 if (res->res_free != &vmw_user_stream_free) {
1120 ret = -EINVAL;
1121 goto out;
1122 }
1123
1124 stream = container_of(res, struct vmw_user_stream, stream.res);
1125 if (stream->base.tfile != tfile) {
1126 ret = -EINVAL;
1127 goto out;
1128 }
1129
1130 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1131out:
1132 vmw_resource_unreference(&res);
1133 return ret;
1134}
1135
1136int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1137 struct drm_file *file_priv)
1138{
1139 struct vmw_private *dev_priv = vmw_priv(dev);
1140 struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1141 struct vmw_resource *res;
1142 struct vmw_resource *tmp;
1143 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1144 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1145 int ret;
1146
1147 if (unlikely(stream == NULL))
1148 return -ENOMEM;
1149
1150 res = &stream->stream.res;
1151 stream->base.shareable = false;
1152 stream->base.tfile = NULL;
1153
1154 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1155 if (unlikely(ret != 0))
1156 return ret;
1157
1158 tmp = vmw_resource_reference(res);
1159 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1160 &vmw_user_stream_base_release, NULL);
1161
1162 if (unlikely(ret != 0)) {
1163 vmw_resource_unreference(&tmp);
1164 goto out_err;
1165 }
1166
1167 arg->stream_id = res->id;
1168out_err:
1169 vmw_resource_unreference(&res);
1170 return ret;
1171}
1172
1173int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1174 struct ttm_object_file *tfile,
1175 uint32_t *inout_id, struct vmw_resource **out)
1176{
1177 struct vmw_user_stream *stream;
1178 struct vmw_resource *res;
1179 int ret;
1180
1181 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1182 if (unlikely(res == NULL))
1183 return -EINVAL;
1184
1185 if (res->res_free != &vmw_user_stream_free) {
1186 ret = -EINVAL;
1187 goto err_ref;
1188 }
1189
1190 stream = container_of(res, struct vmw_user_stream, stream.res);
1191 if (stream->base.tfile != tfile) {
1192 ret = -EPERM;
1193 goto err_ref;
1194 }
1195
1196 *inout_id = stream->stream.stream_id;
1197 *out = res;
1198 return 0;
1199err_ref:
1200 vmw_resource_unreference(&res);
1201 return ret;
1202}
This page took 0.149211 seconds and 5 git commands to generate.