drm/vmwgfx: remove use of fence_obj_args
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
760285e7
DH
29#include <drm/vmwgfx_drm.h>
30#include <drm/ttm/ttm_object.h>
31#include <drm/ttm/ttm_placement.h>
32#include <drm/drmP.h>
fb1d9738 33
fb1d9738
JB
34struct vmw_user_context {
35 struct ttm_base_object base;
36 struct vmw_resource res;
37};
38
39struct vmw_user_surface {
40 struct ttm_base_object base;
41 struct vmw_surface srf;
414ee50b 42 uint32_t size;
fb1d9738
JB
43};
44
45struct vmw_user_dma_buffer {
46 struct ttm_base_object base;
47 struct vmw_dma_buffer dma;
48};
49
50struct vmw_bo_user_rep {
51 uint32_t handle;
52 uint64_t map_handle;
53};
54
55struct vmw_stream {
56 struct vmw_resource res;
57 uint32_t stream_id;
58};
59
60struct vmw_user_stream {
61 struct ttm_base_object base;
62 struct vmw_stream stream;
63};
64
5bb39e81
TH
65struct vmw_surface_offset {
66 uint32_t face;
67 uint32_t mip;
68 uint32_t bo_offset;
69};
70
414ee50b
TH
71
72static uint64_t vmw_user_context_size;
73static uint64_t vmw_user_surface_size;
74static uint64_t vmw_user_stream_size;
75
fb1d9738
JB
76static inline struct vmw_dma_buffer *
77vmw_dma_buffer(struct ttm_buffer_object *bo)
78{
79 return container_of(bo, struct vmw_dma_buffer, base);
80}
81
82static inline struct vmw_user_dma_buffer *
83vmw_user_dma_buffer(struct ttm_buffer_object *bo)
84{
85 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
86 return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
87}
88
89struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
90{
91 kref_get(&res->kref);
92 return res;
93}
94
5bb39e81
TH
95
96/**
97 * vmw_resource_release_id - release a resource id to the id manager.
98 *
99 * @res: Pointer to the resource.
100 *
101 * Release the resource id to the resource id manager and set it to -1
102 */
103static void vmw_resource_release_id(struct vmw_resource *res)
104{
105 struct vmw_private *dev_priv = res->dev_priv;
106
107 write_lock(&dev_priv->resource_lock);
108 if (res->id != -1)
109 idr_remove(res->idr, res->id);
110 res->id = -1;
111 write_unlock(&dev_priv->resource_lock);
112}
113
fb1d9738
JB
114static void vmw_resource_release(struct kref *kref)
115{
116 struct vmw_resource *res =
117 container_of(kref, struct vmw_resource, kref);
118 struct vmw_private *dev_priv = res->dev_priv;
5bb39e81
TH
119 int id = res->id;
120 struct idr *idr = res->idr;
fb1d9738 121
5bb39e81
TH
122 res->avail = false;
123 if (res->remove_from_lists != NULL)
124 res->remove_from_lists(res);
fb1d9738
JB
125 write_unlock(&dev_priv->resource_lock);
126
127 if (likely(res->hw_destroy != NULL))
128 res->hw_destroy(res);
129
130 if (res->res_free != NULL)
131 res->res_free(res);
132 else
133 kfree(res);
134
135 write_lock(&dev_priv->resource_lock);
5bb39e81
TH
136
137 if (id != -1)
138 idr_remove(idr, id);
fb1d9738
JB
139}
140
141void vmw_resource_unreference(struct vmw_resource **p_res)
142{
143 struct vmw_resource *res = *p_res;
144 struct vmw_private *dev_priv = res->dev_priv;
145
146 *p_res = NULL;
147 write_lock(&dev_priv->resource_lock);
148 kref_put(&res->kref, vmw_resource_release);
149 write_unlock(&dev_priv->resource_lock);
150}
151
5bb39e81
TH
152
153/**
154 * vmw_resource_alloc_id - release a resource id to the id manager.
155 *
156 * @dev_priv: Pointer to the device private structure.
157 * @res: Pointer to the resource.
158 *
159 * Allocate the lowest free resource from the resource manager, and set
160 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
161 */
162static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
163 struct vmw_resource *res)
164{
165 int ret;
166
167 BUG_ON(res->id != -1);
168
169 do {
170 if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
171 return -ENOMEM;
172
173 write_lock(&dev_priv->resource_lock);
174 ret = idr_get_new_above(res->idr, res, 1, &res->id);
175 write_unlock(&dev_priv->resource_lock);
176
177 } while (ret == -EAGAIN);
178
179 return ret;
180}
181
182
fb1d9738
JB
183static int vmw_resource_init(struct vmw_private *dev_priv,
184 struct vmw_resource *res,
185 struct idr *idr,
186 enum ttm_object_type obj_type,
5bb39e81
TH
187 bool delay_id,
188 void (*res_free) (struct vmw_resource *res),
189 void (*remove_from_lists)
190 (struct vmw_resource *res))
fb1d9738 191{
fb1d9738
JB
192 kref_init(&res->kref);
193 res->hw_destroy = NULL;
194 res->res_free = res_free;
5bb39e81 195 res->remove_from_lists = remove_from_lists;
fb1d9738
JB
196 res->res_type = obj_type;
197 res->idr = idr;
198 res->avail = false;
199 res->dev_priv = dev_priv;
e2fa3a76 200 INIT_LIST_HEAD(&res->query_head);
f18c8840 201 INIT_LIST_HEAD(&res->validate_head);
5bb39e81
TH
202 res->id = -1;
203 if (delay_id)
204 return 0;
205 else
206 return vmw_resource_alloc_id(dev_priv, res);
fb1d9738
JB
207}
208
209/**
210 * vmw_resource_activate
211 *
212 * @res: Pointer to the newly created resource
213 * @hw_destroy: Destroy function. NULL if none.
214 *
215 * Activate a resource after the hardware has been made aware of it.
216 * Set tye destroy function to @destroy. Typically this frees the
217 * resource and destroys the hardware resources associated with it.
218 * Activate basically means that the function vmw_resource_lookup will
219 * find it.
220 */
221
222static void vmw_resource_activate(struct vmw_resource *res,
223 void (*hw_destroy) (struct vmw_resource *))
224{
225 struct vmw_private *dev_priv = res->dev_priv;
226
227 write_lock(&dev_priv->resource_lock);
228 res->avail = true;
229 res->hw_destroy = hw_destroy;
230 write_unlock(&dev_priv->resource_lock);
231}
232
233struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
234 struct idr *idr, int id)
235{
236 struct vmw_resource *res;
237
238 read_lock(&dev_priv->resource_lock);
239 res = idr_find(idr, id);
240 if (res && res->avail)
241 kref_get(&res->kref);
242 else
243 res = NULL;
244 read_unlock(&dev_priv->resource_lock);
245
246 if (unlikely(res == NULL))
247 return NULL;
248
249 return res;
250}
251
252/**
253 * Context management:
254 */
255
256static void vmw_hw_context_destroy(struct vmw_resource *res)
257{
258
259 struct vmw_private *dev_priv = res->dev_priv;
260 struct {
261 SVGA3dCmdHeader header;
262 SVGA3dCmdDestroyContext body;
e2fa3a76
TH
263 } *cmd;
264
265
266 vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
fb1d9738 267
e2fa3a76 268 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
fb1d9738
JB
269 if (unlikely(cmd == NULL)) {
270 DRM_ERROR("Failed reserving FIFO space for surface "
271 "destruction.\n");
272 return;
273 }
274
275 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
276 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
277 cmd->body.cid = cpu_to_le32(res->id);
278
279 vmw_fifo_commit(dev_priv, sizeof(*cmd));
05730b32 280 vmw_3d_resource_dec(dev_priv, false);
fb1d9738
JB
281}
282
283static int vmw_context_init(struct vmw_private *dev_priv,
284 struct vmw_resource *res,
285 void (*res_free) (struct vmw_resource *res))
286{
287 int ret;
288
289 struct {
290 SVGA3dCmdHeader header;
291 SVGA3dCmdDefineContext body;
292 } *cmd;
293
294 ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
5bb39e81 295 VMW_RES_CONTEXT, false, res_free, NULL);
fb1d9738
JB
296
297 if (unlikely(ret != 0)) {
5bb39e81
TH
298 DRM_ERROR("Failed to allocate a resource id.\n");
299 goto out_early;
300 }
301
302 if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
303 DRM_ERROR("Out of hw context ids.\n");
304 vmw_resource_unreference(&res);
305 return -ENOMEM;
fb1d9738
JB
306 }
307
308 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
309 if (unlikely(cmd == NULL)) {
310 DRM_ERROR("Fifo reserve failed.\n");
311 vmw_resource_unreference(&res);
312 return -ENOMEM;
313 }
314
315 cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
316 cmd->header.size = cpu_to_le32(sizeof(cmd->body));
317 cmd->body.cid = cpu_to_le32(res->id);
318
319 vmw_fifo_commit(dev_priv, sizeof(*cmd));
05730b32 320 (void) vmw_3d_resource_inc(dev_priv, false);
fb1d9738
JB
321 vmw_resource_activate(res, vmw_hw_context_destroy);
322 return 0;
5bb39e81
TH
323
324out_early:
325 if (res_free == NULL)
326 kfree(res);
327 else
328 res_free(res);
329 return ret;
fb1d9738
JB
330}
331
332struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
333{
334 struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
335 int ret;
336
337 if (unlikely(res == NULL))
338 return NULL;
339
340 ret = vmw_context_init(dev_priv, res, NULL);
341 return (ret == 0) ? res : NULL;
342}
343
344/**
345 * User-space context management:
346 */
347
348static void vmw_user_context_free(struct vmw_resource *res)
349{
350 struct vmw_user_context *ctx =
351 container_of(res, struct vmw_user_context, res);
414ee50b 352 struct vmw_private *dev_priv = res->dev_priv;
fb1d9738
JB
353
354 kfree(ctx);
414ee50b
TH
355 ttm_mem_global_free(vmw_mem_glob(dev_priv),
356 vmw_user_context_size);
fb1d9738
JB
357}
358
359/**
360 * This function is called when user space has no more references on the
361 * base object. It releases the base-object's reference on the resource object.
362 */
363
364static void vmw_user_context_base_release(struct ttm_base_object **p_base)
365{
366 struct ttm_base_object *base = *p_base;
367 struct vmw_user_context *ctx =
368 container_of(base, struct vmw_user_context, base);
369 struct vmw_resource *res = &ctx->res;
370
371 *p_base = NULL;
372 vmw_resource_unreference(&res);
373}
374
375int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
376 struct drm_file *file_priv)
377{
378 struct vmw_private *dev_priv = vmw_priv(dev);
379 struct vmw_resource *res;
380 struct vmw_user_context *ctx;
381 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
382 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
383 int ret = 0;
384
385 res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
386 if (unlikely(res == NULL))
387 return -EINVAL;
388
389 if (res->res_free != &vmw_user_context_free) {
390 ret = -EINVAL;
391 goto out;
392 }
393
394 ctx = container_of(res, struct vmw_user_context, res);
395 if (ctx->base.tfile != tfile && !ctx->base.shareable) {
396 ret = -EPERM;
397 goto out;
398 }
399
400 ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
401out:
402 vmw_resource_unreference(&res);
403 return ret;
404}
405
406int vmw_context_define_ioctl(struct drm_device *dev, void *data,
407 struct drm_file *file_priv)
408{
409 struct vmw_private *dev_priv = vmw_priv(dev);
414ee50b 410 struct vmw_user_context *ctx;
fb1d9738
JB
411 struct vmw_resource *res;
412 struct vmw_resource *tmp;
413 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
414 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
414ee50b 415 struct vmw_master *vmaster = vmw_master(file_priv->master);
fb1d9738
JB
416 int ret;
417
414ee50b
TH
418
419 /*
420 * Approximate idr memory usage with 128 bytes. It will be limited
421 * by maximum number_of contexts anyway.
422 */
423
424 if (unlikely(vmw_user_context_size == 0))
425 vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
426
427 ret = ttm_read_lock(&vmaster->lock, true);
428 if (unlikely(ret != 0))
429 return ret;
430
431 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
432 vmw_user_context_size,
433 false, true);
434 if (unlikely(ret != 0)) {
435 if (ret != -ERESTARTSYS)
436 DRM_ERROR("Out of graphics memory for context"
437 " creation.\n");
438 goto out_unlock;
439 }
440
441 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
442 if (unlikely(ctx == NULL)) {
443 ttm_mem_global_free(vmw_mem_glob(dev_priv),
444 vmw_user_context_size);
445 ret = -ENOMEM;
446 goto out_unlock;
447 }
fb1d9738
JB
448
449 res = &ctx->res;
450 ctx->base.shareable = false;
451 ctx->base.tfile = NULL;
452
414ee50b
TH
453 /*
454 * From here on, the destructor takes over resource freeing.
455 */
456
fb1d9738
JB
457 ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
458 if (unlikely(ret != 0))
414ee50b 459 goto out_unlock;
fb1d9738
JB
460
461 tmp = vmw_resource_reference(&ctx->res);
462 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
463 &vmw_user_context_base_release, NULL);
464
465 if (unlikely(ret != 0)) {
466 vmw_resource_unreference(&tmp);
467 goto out_err;
468 }
469
470 arg->cid = res->id;
471out_err:
472 vmw_resource_unreference(&res);
414ee50b
TH
473out_unlock:
474 ttm_read_unlock(&vmaster->lock);
fb1d9738
JB
475 return ret;
476
477}
478
479int vmw_context_check(struct vmw_private *dev_priv,
480 struct ttm_object_file *tfile,
be38ab6e
TH
481 int id,
482 struct vmw_resource **p_res)
fb1d9738
JB
483{
484 struct vmw_resource *res;
485 int ret = 0;
486
487 read_lock(&dev_priv->resource_lock);
488 res = idr_find(&dev_priv->context_idr, id);
489 if (res && res->avail) {
490 struct vmw_user_context *ctx =
491 container_of(res, struct vmw_user_context, res);
492 if (ctx->base.tfile != tfile && !ctx->base.shareable)
493 ret = -EPERM;
be38ab6e
TH
494 if (p_res)
495 *p_res = vmw_resource_reference(res);
fb1d9738
JB
496 } else
497 ret = -EINVAL;
498 read_unlock(&dev_priv->resource_lock);
499
500 return ret;
501}
502
5bb39e81
TH
503struct vmw_bpp {
504 uint8_t bpp;
505 uint8_t s_bpp;
506};
507
508/*
509 * Size table for the supported SVGA3D surface formats. It consists of
510 * two values. The bpp value and the s_bpp value which is short for
511 * "stride bits per pixel" The values are given in such a way that the
512 * minimum stride for the image is calculated using
513 *
514 * min_stride = w*s_bpp
515 *
516 * and the total memory requirement for the image is
517 *
518 * h*min_stride*bpp/s_bpp
519 *
520 */
521static const struct vmw_bpp vmw_sf_bpp[] = {
522 [SVGA3D_FORMAT_INVALID] = {0, 0},
523 [SVGA3D_X8R8G8B8] = {32, 32},
524 [SVGA3D_A8R8G8B8] = {32, 32},
525 [SVGA3D_R5G6B5] = {16, 16},
526 [SVGA3D_X1R5G5B5] = {16, 16},
527 [SVGA3D_A1R5G5B5] = {16, 16},
528 [SVGA3D_A4R4G4B4] = {16, 16},
529 [SVGA3D_Z_D32] = {32, 32},
530 [SVGA3D_Z_D16] = {16, 16},
531 [SVGA3D_Z_D24S8] = {32, 32},
532 [SVGA3D_Z_D15S1] = {16, 16},
533 [SVGA3D_LUMINANCE8] = {8, 8},
534 [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
535 [SVGA3D_LUMINANCE16] = {16, 16},
536 [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
537 [SVGA3D_DXT1] = {4, 16},
538 [SVGA3D_DXT2] = {8, 32},
539 [SVGA3D_DXT3] = {8, 32},
540 [SVGA3D_DXT4] = {8, 32},
541 [SVGA3D_DXT5] = {8, 32},
542 [SVGA3D_BUMPU8V8] = {16, 16},
543 [SVGA3D_BUMPL6V5U5] = {16, 16},
544 [SVGA3D_BUMPX8L8V8U8] = {32, 32},
545 [SVGA3D_ARGB_S10E5] = {16, 16},
546 [SVGA3D_ARGB_S23E8] = {32, 32},
547 [SVGA3D_A2R10G10B10] = {32, 32},
548 [SVGA3D_V8U8] = {16, 16},
549 [SVGA3D_Q8W8V8U8] = {32, 32},
550 [SVGA3D_CxV8U8] = {16, 16},
551 [SVGA3D_X8L8V8U8] = {32, 32},
552 [SVGA3D_A2W10V10U10] = {32, 32},
553 [SVGA3D_ALPHA8] = {8, 8},
554 [SVGA3D_R_S10E5] = {16, 16},
555 [SVGA3D_R_S23E8] = {32, 32},
556 [SVGA3D_RG_S10E5] = {16, 16},
557 [SVGA3D_RG_S23E8] = {32, 32},
558 [SVGA3D_BUFFER] = {8, 8},
559 [SVGA3D_Z_D24X8] = {32, 32},
560 [SVGA3D_V16U16] = {32, 32},
561 [SVGA3D_G16R16] = {32, 32},
562 [SVGA3D_A16B16G16R16] = {64, 64},
563 [SVGA3D_UYVY] = {12, 12},
564 [SVGA3D_YUY2] = {12, 12},
565 [SVGA3D_NV12] = {12, 8},
566 [SVGA3D_AYUV] = {32, 32},
567 [SVGA3D_BC4_UNORM] = {4, 16},
568 [SVGA3D_BC5_UNORM] = {8, 32},
569 [SVGA3D_Z_DF16] = {16, 16},
570 [SVGA3D_Z_DF24] = {24, 24},
571 [SVGA3D_Z_D24S8_INT] = {32, 32}
572};
573
fb1d9738
JB
574
575/**
576 * Surface management.
577 */
578
5bb39e81
TH
579struct vmw_surface_dma {
580 SVGA3dCmdHeader header;
581 SVGA3dCmdSurfaceDMA body;
582 SVGA3dCopyBox cb;
583 SVGA3dCmdSurfaceDMASuffix suffix;
584};
585
586struct vmw_surface_define {
587 SVGA3dCmdHeader header;
588 SVGA3dCmdDefineSurface body;
589};
590
591struct vmw_surface_destroy {
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDestroySurface body;
594};
595
596
597/**
598 * vmw_surface_dma_size - Compute fifo size for a dma command.
599 *
600 * @srf: Pointer to a struct vmw_surface
601 *
602 * Computes the required size for a surface dma command for backup or
603 * restoration of the surface represented by @srf.
604 */
605static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
606{
607 return srf->num_sizes * sizeof(struct vmw_surface_dma);
608}
609
610
611/**
612 * vmw_surface_define_size - Compute fifo size for a surface define command.
613 *
614 * @srf: Pointer to a struct vmw_surface
615 *
616 * Computes the required size for a surface define command for the definition
617 * of the surface represented by @srf.
618 */
619static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
620{
621 return sizeof(struct vmw_surface_define) + srf->num_sizes *
622 sizeof(SVGA3dSize);
623}
624
625
626/**
627 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
628 *
629 * Computes the required size for a surface destroy command for the destruction
630 * of a hw surface.
631 */
632static inline uint32_t vmw_surface_destroy_size(void)
633{
634 return sizeof(struct vmw_surface_destroy);
635}
636
637/**
638 * vmw_surface_destroy_encode - Encode a surface_destroy command.
639 *
640 * @id: The surface id
641 * @cmd_space: Pointer to memory area in which the commands should be encoded.
642 */
643static void vmw_surface_destroy_encode(uint32_t id,
644 void *cmd_space)
645{
646 struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
647 cmd_space;
648
649 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
650 cmd->header.size = sizeof(cmd->body);
651 cmd->body.sid = id;
652}
653
654/**
655 * vmw_surface_define_encode - Encode a surface_define command.
656 *
657 * @srf: Pointer to a struct vmw_surface object.
658 * @cmd_space: Pointer to memory area in which the commands should be encoded.
659 */
660static void vmw_surface_define_encode(const struct vmw_surface *srf,
661 void *cmd_space)
662{
663 struct vmw_surface_define *cmd = (struct vmw_surface_define *)
664 cmd_space;
665 struct drm_vmw_size *src_size;
666 SVGA3dSize *cmd_size;
667 uint32_t cmd_len;
668 int i;
669
670 cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
671
672 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
673 cmd->header.size = cmd_len;
674 cmd->body.sid = srf->res.id;
675 cmd->body.surfaceFlags = srf->flags;
676 cmd->body.format = cpu_to_le32(srf->format);
677 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
678 cmd->body.face[i].numMipLevels = srf->mip_levels[i];
679
680 cmd += 1;
681 cmd_size = (SVGA3dSize *) cmd;
682 src_size = srf->sizes;
683
684 for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
685 cmd_size->width = src_size->width;
686 cmd_size->height = src_size->height;
687 cmd_size->depth = src_size->depth;
688 }
689}
690
691
692/**
693 * vmw_surface_dma_encode - Encode a surface_dma command.
694 *
695 * @srf: Pointer to a struct vmw_surface object.
696 * @cmd_space: Pointer to memory area in which the commands should be encoded.
697 * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
698 * should be placed or read from.
699 * @to_surface: Boolean whether to DMA to the surface or from the surface.
700 */
701static void vmw_surface_dma_encode(struct vmw_surface *srf,
702 void *cmd_space,
703 const SVGAGuestPtr *ptr,
704 bool to_surface)
705{
706 uint32_t i;
707 uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
708 uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
709 struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
710
711 for (i = 0; i < srf->num_sizes; ++i) {
712 SVGA3dCmdHeader *header = &cmd->header;
713 SVGA3dCmdSurfaceDMA *body = &cmd->body;
714 SVGA3dCopyBox *cb = &cmd->cb;
715 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
716 const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
717 const struct drm_vmw_size *cur_size = &srf->sizes[i];
718
719 header->id = SVGA_3D_CMD_SURFACE_DMA;
720 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
721
722 body->guest.ptr = *ptr;
723 body->guest.ptr.offset += cur_offset->bo_offset;
724 body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
725 body->host.sid = srf->res.id;
726 body->host.face = cur_offset->face;
727 body->host.mipmap = cur_offset->mip;
728 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
729 SVGA3D_READ_HOST_VRAM);
730 cb->x = 0;
731 cb->y = 0;
732 cb->z = 0;
733 cb->srcx = 0;
734 cb->srcy = 0;
735 cb->srcz = 0;
736 cb->w = cur_size->width;
737 cb->h = cur_size->height;
738 cb->d = cur_size->depth;
739
740 suffix->suffixSize = sizeof(*suffix);
741 suffix->maximumOffset = body->guest.pitch*cur_size->height*
742 cur_size->depth*bpp / stride_bpp;
743 suffix->flags.discard = 0;
744 suffix->flags.unsynchronized = 0;
745 suffix->flags.reserved = 0;
746 ++cmd;
747 }
748};
749
750
fb1d9738
JB
751static void vmw_hw_surface_destroy(struct vmw_resource *res)
752{
753
754 struct vmw_private *dev_priv = res->dev_priv;
5bb39e81
TH
755 struct vmw_surface *srf;
756 void *cmd;
fb1d9738 757
5bb39e81 758 if (res->id != -1) {
fb1d9738 759
5bb39e81
TH
760 cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
761 if (unlikely(cmd == NULL)) {
762 DRM_ERROR("Failed reserving FIFO space for surface "
763 "destruction.\n");
764 return;
765 }
fb1d9738 766
5bb39e81
TH
767 vmw_surface_destroy_encode(res->id, cmd);
768 vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
769
770 /*
771 * used_memory_size_atomic, or separate lock
772 * to avoid taking dev_priv::cmdbuf_mutex in
773 * the destroy path.
774 */
775
776 mutex_lock(&dev_priv->cmdbuf_mutex);
777 srf = container_of(res, struct vmw_surface, res);
778 dev_priv->used_memory_size -= srf->backup_size;
779 mutex_unlock(&dev_priv->cmdbuf_mutex);
780
781 }
05730b32 782 vmw_3d_resource_dec(dev_priv, false);
fb1d9738
JB
783}
784
785void vmw_surface_res_free(struct vmw_resource *res)
786{
787 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
788
5bb39e81
TH
789 if (srf->backup)
790 ttm_bo_unref(&srf->backup);
791 kfree(srf->offsets);
fb1d9738
JB
792 kfree(srf->sizes);
793 kfree(srf->snooper.image);
794 kfree(srf);
795}
796
5bb39e81
TH
797
798/**
799 * vmw_surface_do_validate - make a surface available to the device.
800 *
801 * @dev_priv: Pointer to a device private struct.
802 * @srf: Pointer to a struct vmw_surface.
803 *
804 * If the surface doesn't have a hw id, allocate one, and optionally
805 * DMA the backed up surface contents to the device.
806 *
807 * Returns -EBUSY if there wasn't sufficient device resources to
808 * complete the validation. Retry after freeing up resources.
809 *
810 * May return other errors if the kernel is out of guest resources.
811 */
812int vmw_surface_do_validate(struct vmw_private *dev_priv,
813 struct vmw_surface *srf)
fb1d9738 814{
fb1d9738 815 struct vmw_resource *res = &srf->res;
5bb39e81
TH
816 struct list_head val_list;
817 struct ttm_validate_buffer val_buf;
818 uint32_t submit_size;
819 uint8_t *cmd;
820 int ret;
fb1d9738 821
5bb39e81
TH
822 if (likely(res->id != -1))
823 return 0;
824
825 if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
826 dev_priv->memory_size))
827 return -EBUSY;
828
829 /*
830 * Reserve- and validate the backup DMA bo.
831 */
832
833 if (srf->backup) {
834 INIT_LIST_HEAD(&val_list);
835 val_buf.bo = ttm_bo_reference(srf->backup);
5bb39e81
TH
836 list_add_tail(&val_buf.head, &val_list);
837 ret = ttm_eu_reserve_buffers(&val_list);
838 if (unlikely(ret != 0))
839 goto out_no_reserve;
840
841 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
842 true, false, false);
843 if (unlikely(ret != 0))
844 goto out_no_validate;
845 }
846
847 /*
848 * Alloc id for the resource.
849 */
fb1d9738 850
5bb39e81 851 ret = vmw_resource_alloc_id(dev_priv, res);
fb1d9738 852 if (unlikely(ret != 0)) {
5bb39e81
TH
853 DRM_ERROR("Failed to allocate a surface id.\n");
854 goto out_no_id;
855 }
856 if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
857 ret = -EBUSY;
858 goto out_no_fifo;
fb1d9738
JB
859 }
860
5bb39e81
TH
861
862 /*
863 * Encode surface define- and dma commands.
864 */
865
866 submit_size = vmw_surface_define_size(srf);
867 if (srf->backup)
868 submit_size += vmw_surface_dma_size(srf);
fb1d9738
JB
869
870 cmd = vmw_fifo_reserve(dev_priv, submit_size);
871 if (unlikely(cmd == NULL)) {
5bb39e81
TH
872 DRM_ERROR("Failed reserving FIFO space for surface "
873 "validation.\n");
874 ret = -ENOMEM;
875 goto out_no_fifo;
fb1d9738
JB
876 }
877
5bb39e81
TH
878 vmw_surface_define_encode(srf, cmd);
879 if (srf->backup) {
880 SVGAGuestPtr ptr;
881
882 cmd += vmw_surface_define_size(srf);
883 vmw_bo_get_guest_ptr(srf->backup, &ptr);
884 vmw_surface_dma_encode(srf, cmd, &ptr, true);
fb1d9738
JB
885 }
886
5bb39e81 887 vmw_fifo_commit(dev_priv, submit_size);
fb1d9738 888
5bb39e81
TH
889 /*
890 * Create a fence object and fence the backup buffer.
891 */
892
893 if (srf->backup) {
894 struct vmw_fence_obj *fence;
895
896 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
897 &fence, NULL);
898 ttm_eu_fence_buffer_objects(&val_list, fence);
899 if (likely(fence != NULL))
900 vmw_fence_obj_unreference(&fence);
901 ttm_bo_unref(&val_buf.bo);
902 ttm_bo_unref(&srf->backup);
fb1d9738
JB
903 }
904
5bb39e81
TH
905 /*
906 * Surface memory usage accounting.
907 */
908
909 dev_priv->used_memory_size += srf->backup_size;
910
911 return 0;
912
913out_no_fifo:
914 vmw_resource_release_id(res);
915out_no_id:
916out_no_validate:
917 if (srf->backup)
918 ttm_eu_backoff_reservation(&val_list);
919out_no_reserve:
920 if (srf->backup)
921 ttm_bo_unref(&val_buf.bo);
922 return ret;
923}
924
925/**
926 * vmw_surface_evict - Evict a hw surface.
927 *
928 * @dev_priv: Pointer to a device private struct.
929 * @srf: Pointer to a struct vmw_surface
930 *
931 * DMA the contents of a hw surface to a backup guest buffer object,
932 * and destroy the hw surface, releasing its id.
933 */
934int vmw_surface_evict(struct vmw_private *dev_priv,
935 struct vmw_surface *srf)
936{
937 struct vmw_resource *res = &srf->res;
938 struct list_head val_list;
939 struct ttm_validate_buffer val_buf;
940 uint32_t submit_size;
941 uint8_t *cmd;
942 int ret;
943 struct vmw_fence_obj *fence;
944 SVGAGuestPtr ptr;
945
946 BUG_ON(res->id == -1);
947
948 /*
949 * Create a surface backup buffer object.
950 */
951
952 if (!srf->backup) {
953 ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
954 ttm_bo_type_device,
0b91c4a1 955 &vmw_srf_placement, 0, true,
5bb39e81
TH
956 NULL, &srf->backup);
957 if (unlikely(ret != 0))
958 return ret;
959 }
960
961 /*
962 * Reserve- and validate the backup DMA bo.
963 */
964
965 INIT_LIST_HEAD(&val_list);
966 val_buf.bo = ttm_bo_reference(srf->backup);
5bb39e81
TH
967 list_add_tail(&val_buf.head, &val_list);
968 ret = ttm_eu_reserve_buffers(&val_list);
969 if (unlikely(ret != 0))
970 goto out_no_reserve;
971
972 ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
973 true, false, false);
974 if (unlikely(ret != 0))
975 goto out_no_validate;
976
977
978 /*
979 * Encode the dma- and surface destroy commands.
980 */
981
982 submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
983 cmd = vmw_fifo_reserve(dev_priv, submit_size);
984 if (unlikely(cmd == NULL)) {
985 DRM_ERROR("Failed reserving FIFO space for surface "
986 "eviction.\n");
987 ret = -ENOMEM;
988 goto out_no_fifo;
989 }
990
991 vmw_bo_get_guest_ptr(srf->backup, &ptr);
992 vmw_surface_dma_encode(srf, cmd, &ptr, false);
993 cmd += vmw_surface_dma_size(srf);
994 vmw_surface_destroy_encode(res->id, cmd);
fb1d9738 995 vmw_fifo_commit(dev_priv, submit_size);
5bb39e81
TH
996
997 /*
998 * Surface memory usage accounting.
999 */
1000
1001 dev_priv->used_memory_size -= srf->backup_size;
1002
1003 /*
1004 * Create a fence object and fence the DMA buffer.
1005 */
1006
1007 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
1008 &fence, NULL);
1009 ttm_eu_fence_buffer_objects(&val_list, fence);
1010 if (likely(fence != NULL))
1011 vmw_fence_obj_unreference(&fence);
1012 ttm_bo_unref(&val_buf.bo);
1013
1014 /*
1015 * Release the surface ID.
1016 */
1017
1018 vmw_resource_release_id(res);
1019
1020 return 0;
1021
1022out_no_fifo:
1023out_no_validate:
1024 if (srf->backup)
1025 ttm_eu_backoff_reservation(&val_list);
1026out_no_reserve:
1027 ttm_bo_unref(&val_buf.bo);
1028 ttm_bo_unref(&srf->backup);
1029 return ret;
1030}
1031
1032
1033/**
1034 * vmw_surface_validate - make a surface available to the device, evicting
1035 * other surfaces if needed.
1036 *
1037 * @dev_priv: Pointer to a device private struct.
1038 * @srf: Pointer to a struct vmw_surface.
1039 *
1040 * Try to validate a surface and if it fails due to limited device resources,
1041 * repeatedly try to evict other surfaces until the request can be
1042 * acommodated.
1043 *
1044 * May return errors if out of resources.
1045 */
1046int vmw_surface_validate(struct vmw_private *dev_priv,
1047 struct vmw_surface *srf)
1048{
1049 int ret;
1050 struct vmw_surface *evict_srf;
1051
1052 do {
1053 write_lock(&dev_priv->resource_lock);
1054 list_del_init(&srf->lru_head);
1055 write_unlock(&dev_priv->resource_lock);
1056
1057 ret = vmw_surface_do_validate(dev_priv, srf);
1058 if (likely(ret != -EBUSY))
1059 break;
1060
1061 write_lock(&dev_priv->resource_lock);
1062 if (list_empty(&dev_priv->surface_lru)) {
1063 DRM_ERROR("Out of device memory for surfaces.\n");
1064 ret = -EBUSY;
1065 write_unlock(&dev_priv->resource_lock);
1066 break;
1067 }
1068
1069 evict_srf = vmw_surface_reference
1070 (list_first_entry(&dev_priv->surface_lru,
1071 struct vmw_surface,
1072 lru_head));
1073 list_del_init(&evict_srf->lru_head);
1074
1075 write_unlock(&dev_priv->resource_lock);
1076 (void) vmw_surface_evict(dev_priv, evict_srf);
1077
1078 vmw_surface_unreference(&evict_srf);
1079
1080 } while (1);
1081
1082 if (unlikely(ret != 0 && srf->res.id != -1)) {
1083 write_lock(&dev_priv->resource_lock);
1084 list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
1085 write_unlock(&dev_priv->resource_lock);
1086 }
1087
1088 return ret;
1089}
1090
1091
1092/**
1093 * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
1094 *
1095 * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
1096 *
1097 * As part of the resource destruction, remove the surface from any
1098 * lookup lists.
1099 */
1100static void vmw_surface_remove_from_lists(struct vmw_resource *res)
1101{
1102 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1103
1104 list_del_init(&srf->lru_head);
1105}
1106
1107int vmw_surface_init(struct vmw_private *dev_priv,
1108 struct vmw_surface *srf,
1109 void (*res_free) (struct vmw_resource *res))
1110{
1111 int ret;
1112 struct vmw_resource *res = &srf->res;
1113
1114 BUG_ON(res_free == NULL);
1115 INIT_LIST_HEAD(&srf->lru_head);
1116 ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
1117 VMW_RES_SURFACE, true, res_free,
1118 vmw_surface_remove_from_lists);
1119
1120 if (unlikely(ret != 0))
1121 res_free(res);
1122
1123 /*
1124 * The surface won't be visible to hardware until a
1125 * surface validate.
1126 */
1127
05730b32 1128 (void) vmw_3d_resource_inc(dev_priv, false);
fb1d9738 1129 vmw_resource_activate(res, vmw_hw_surface_destroy);
5bb39e81 1130 return ret;
fb1d9738
JB
1131}
1132
1133static void vmw_user_surface_free(struct vmw_resource *res)
1134{
1135 struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
1136 struct vmw_user_surface *user_srf =
1137 container_of(srf, struct vmw_user_surface, srf);
414ee50b
TH
1138 struct vmw_private *dev_priv = srf->res.dev_priv;
1139 uint32_t size = user_srf->size;
fb1d9738 1140
5bb39e81
TH
1141 if (srf->backup)
1142 ttm_bo_unref(&srf->backup);
1143 kfree(srf->offsets);
fb1d9738
JB
1144 kfree(srf->sizes);
1145 kfree(srf->snooper.image);
1146 kfree(user_srf);
414ee50b 1147 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
fb1d9738
JB
1148}
1149
5bb39e81
TH
1150/**
1151 * vmw_resource_unreserve - unreserve resources previously reserved for
1152 * command submission.
1153 *
1154 * @list_head: list of resources to unreserve.
1155 *
1156 * Currently only surfaces are considered, and unreserving a surface
1157 * means putting it back on the device's surface lru list,
1158 * so that it can be evicted if necessary.
1159 * This function traverses the resource list and
1160 * checks whether resources are surfaces, and in that case puts them back
1161 * on the device's surface LRU list.
1162 */
1163void vmw_resource_unreserve(struct list_head *list)
1164{
1165 struct vmw_resource *res;
1166 struct vmw_surface *srf;
1167 rwlock_t *lock = NULL;
1168
1169 list_for_each_entry(res, list, validate_head) {
1170
1171 if (res->res_free != &vmw_surface_res_free &&
1172 res->res_free != &vmw_user_surface_free)
1173 continue;
1174
1175 if (unlikely(lock == NULL)) {
1176 lock = &res->dev_priv->resource_lock;
1177 write_lock(lock);
1178 }
1179
1180 srf = container_of(res, struct vmw_surface, res);
1181 list_del_init(&srf->lru_head);
1182 list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
1183 }
1184
1185 if (lock != NULL)
1186 write_unlock(lock);
1187}
1188
551a6697
JB
1189/**
1190 * Helper function that looks either a surface or dmabuf.
1191 *
1192 * The pointer this pointed at by out_surf and out_buf needs to be null.
1193 */
1194int vmw_user_lookup_handle(struct vmw_private *dev_priv,
1195 struct ttm_object_file *tfile,
1196 uint32_t handle,
1197 struct vmw_surface **out_surf,
1198 struct vmw_dma_buffer **out_buf)
1199{
1200 int ret;
1201
1202 BUG_ON(*out_surf || *out_buf);
1203
1204 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
1205 if (!ret)
1206 return 0;
1207
1208 ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
1209 return ret;
1210}
1211
5bb39e81 1212
7a73ba74
TH
1213int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
1214 struct ttm_object_file *tfile,
1215 uint32_t handle, struct vmw_surface **out)
fb1d9738
JB
1216{
1217 struct vmw_resource *res;
1218 struct vmw_surface *srf;
1219 struct vmw_user_surface *user_srf;
7a73ba74
TH
1220 struct ttm_base_object *base;
1221 int ret = -EINVAL;
fb1d9738 1222
7a73ba74
TH
1223 base = ttm_base_object_lookup(tfile, handle);
1224 if (unlikely(base == NULL))
fb1d9738
JB
1225 return -EINVAL;
1226
7a73ba74
TH
1227 if (unlikely(base->object_type != VMW_RES_SURFACE))
1228 goto out_bad_resource;
1229
1230 user_srf = container_of(base, struct vmw_user_surface, base);
1231 srf = &user_srf->srf;
1232 res = &srf->res;
1233
1234 read_lock(&dev_priv->resource_lock);
1235
1236 if (!res->avail || res->res_free != &vmw_user_surface_free) {
1237 read_unlock(&dev_priv->resource_lock);
1238 goto out_bad_resource;
1239 }
fb1d9738 1240
7a73ba74
TH
1241 kref_get(&res->kref);
1242 read_unlock(&dev_priv->resource_lock);
fb1d9738
JB
1243
1244 *out = srf;
7a73ba74
TH
1245 ret = 0;
1246
1247out_bad_resource:
1248 ttm_base_object_unref(&base);
1249
1250 return ret;
fb1d9738
JB
1251}
1252
1253static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
1254{
1255 struct ttm_base_object *base = *p_base;
1256 struct vmw_user_surface *user_srf =
1257 container_of(base, struct vmw_user_surface, base);
1258 struct vmw_resource *res = &user_srf->srf.res;
1259
1260 *p_base = NULL;
1261 vmw_resource_unreference(&res);
1262}
1263
1264int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
1265 struct drm_file *file_priv)
1266{
fb1d9738
JB
1267 struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
1268 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
fb1d9738 1269
7a73ba74 1270 return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
fb1d9738
JB
1271}
1272
1273int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
1274 struct drm_file *file_priv)
1275{
1276 struct vmw_private *dev_priv = vmw_priv(dev);
0c5d3703 1277 struct vmw_user_surface *user_srf;
fb1d9738
JB
1278 struct vmw_surface *srf;
1279 struct vmw_resource *res;
1280 struct vmw_resource *tmp;
1281 union drm_vmw_surface_create_arg *arg =
1282 (union drm_vmw_surface_create_arg *)data;
1283 struct drm_vmw_surface_create_req *req = &arg->req;
1284 struct drm_vmw_surface_arg *rep = &arg->rep;
1285 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1286 struct drm_vmw_size __user *user_sizes;
1287 int ret;
5bb39e81
TH
1288 int i, j;
1289 uint32_t cur_bo_offset;
1290 struct drm_vmw_size *cur_size;
1291 struct vmw_surface_offset *cur_offset;
1292 uint32_t stride_bpp;
1293 uint32_t bpp;
414ee50b
TH
1294 uint32_t num_sizes;
1295 uint32_t size;
1296 struct vmw_master *vmaster = vmw_master(file_priv->master);
fb1d9738 1297
414ee50b
TH
1298 if (unlikely(vmw_user_surface_size == 0))
1299 vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
1300 128;
1301
1302 num_sizes = 0;
1303 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
1304 num_sizes += req->mip_levels[i];
1305
1306 if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
1307 DRM_VMW_MAX_MIP_LEVELS)
1308 return -EINVAL;
1309
1310 size = vmw_user_surface_size + 128 +
1311 ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
1312 ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
1313
1314
1315 ret = ttm_read_lock(&vmaster->lock, true);
1316 if (unlikely(ret != 0))
1317 return ret;
1318
1319 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1320 size, false, true);
1321 if (unlikely(ret != 0)) {
1322 if (ret != -ERESTARTSYS)
1323 DRM_ERROR("Out of graphics memory for surface"
1324 " creation.\n");
1325 goto out_unlock;
1326 }
1327
1328 user_srf = kmalloc(sizeof(*user_srf), GFP_KERNEL);
1329 if (unlikely(user_srf == NULL)) {
1330 ret = -ENOMEM;
1331 goto out_no_user_srf;
1332 }
fb1d9738
JB
1333
1334 srf = &user_srf->srf;
1335 res = &srf->res;
1336
1337 srf->flags = req->flags;
1338 srf->format = req->format;
a87897ed 1339 srf->scanout = req->scanout;
5bb39e81
TH
1340 srf->backup = NULL;
1341
fb1d9738 1342 memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
414ee50b
TH
1343 srf->num_sizes = num_sizes;
1344 user_srf->size = size;
fb1d9738
JB
1345
1346 srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
1347 if (unlikely(srf->sizes == NULL)) {
1348 ret = -ENOMEM;
414ee50b 1349 goto out_no_sizes;
fb1d9738 1350 }
5bb39e81
TH
1351 srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
1352 GFP_KERNEL);
1353 if (unlikely(srf->sizes == NULL)) {
1354 ret = -ENOMEM;
1355 goto out_no_offsets;
1356 }
fb1d9738
JB
1357
1358 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1359 req->size_addr;
1360
1361 ret = copy_from_user(srf->sizes, user_sizes,
1362 srf->num_sizes * sizeof(*srf->sizes));
9b8eb4d1
DC
1363 if (unlikely(ret != 0)) {
1364 ret = -EFAULT;
414ee50b 1365 goto out_no_copy;
9b8eb4d1 1366 }
fb1d9738 1367
5bb39e81
TH
1368 cur_bo_offset = 0;
1369 cur_offset = srf->offsets;
1370 cur_size = srf->sizes;
1371
1372 bpp = vmw_sf_bpp[srf->format].bpp;
1373 stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
1374
1375 for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
1376 for (j = 0; j < srf->mip_levels[i]; ++j) {
1377 uint32_t stride =
1378 (cur_size->width * stride_bpp + 7) >> 3;
1379
1380 cur_offset->face = i;
1381 cur_offset->mip = j;
1382 cur_offset->bo_offset = cur_bo_offset;
1383 cur_bo_offset += stride * cur_size->height *
1384 cur_size->depth * bpp / stride_bpp;
1385 ++cur_offset;
1386 ++cur_size;
1387 }
1388 }
1389 srf->backup_size = cur_bo_offset;
1390
5ffdb658 1391 if (srf->scanout &&
50ec3b7c
TH
1392 srf->num_sizes == 1 &&
1393 srf->sizes[0].width == 64 &&
1394 srf->sizes[0].height == 64 &&
1395 srf->format == SVGA3D_A8R8G8B8) {
1396
f35119d6
RM
1397 /* allocate image area and clear it */
1398 srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
1399 if (!srf->snooper.image) {
50ec3b7c
TH
1400 DRM_ERROR("Failed to allocate cursor_image\n");
1401 ret = -ENOMEM;
414ee50b 1402 goto out_no_copy;
50ec3b7c
TH
1403 }
1404 } else {
1405 srf->snooper.image = NULL;
1406 }
1407 srf->snooper.crtc = NULL;
1408
fb1d9738
JB
1409 user_srf->base.shareable = false;
1410 user_srf->base.tfile = NULL;
1411
1412 /**
1413 * From this point, the generic resource management functions
1414 * destroy the object on failure.
1415 */
1416
1417 ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
1418 if (unlikely(ret != 0))
414ee50b 1419 goto out_unlock;
fb1d9738
JB
1420
1421 tmp = vmw_resource_reference(&srf->res);
1422 ret = ttm_base_object_init(tfile, &user_srf->base,
1423 req->shareable, VMW_RES_SURFACE,
1424 &vmw_user_surface_base_release, NULL);
1425
1426 if (unlikely(ret != 0)) {
1427 vmw_resource_unreference(&tmp);
1428 vmw_resource_unreference(&res);
414ee50b 1429 goto out_unlock;
fb1d9738
JB
1430 }
1431
7a73ba74
TH
1432 rep->sid = user_srf->base.hash.key;
1433 if (rep->sid == SVGA3D_INVALID_ID)
1434 DRM_ERROR("Created bad Surface ID.\n");
1435
fb1d9738 1436 vmw_resource_unreference(&res);
414ee50b
TH
1437
1438 ttm_read_unlock(&vmaster->lock);
fb1d9738 1439 return 0;
414ee50b 1440out_no_copy:
5bb39e81
TH
1441 kfree(srf->offsets);
1442out_no_offsets:
fb1d9738 1443 kfree(srf->sizes);
414ee50b 1444out_no_sizes:
fb1d9738 1445 kfree(user_srf);
414ee50b
TH
1446out_no_user_srf:
1447 ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
1448out_unlock:
1449 ttm_read_unlock(&vmaster->lock);
fb1d9738
JB
1450 return ret;
1451}
1452
1453int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
1454 struct drm_file *file_priv)
1455{
fb1d9738
JB
1456 union drm_vmw_surface_reference_arg *arg =
1457 (union drm_vmw_surface_reference_arg *)data;
1458 struct drm_vmw_surface_arg *req = &arg->req;
1459 struct drm_vmw_surface_create_req *rep = &arg->rep;
1460 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
fb1d9738
JB
1461 struct vmw_surface *srf;
1462 struct vmw_user_surface *user_srf;
1463 struct drm_vmw_size __user *user_sizes;
7a73ba74
TH
1464 struct ttm_base_object *base;
1465 int ret = -EINVAL;
fb1d9738 1466
7a73ba74
TH
1467 base = ttm_base_object_lookup(tfile, req->sid);
1468 if (unlikely(base == NULL)) {
1469 DRM_ERROR("Could not find surface to reference.\n");
fb1d9738 1470 return -EINVAL;
fb1d9738
JB
1471 }
1472
7a73ba74
TH
1473 if (unlikely(base->object_type != VMW_RES_SURFACE))
1474 goto out_bad_resource;
1475
1476 user_srf = container_of(base, struct vmw_user_surface, base);
1477 srf = &user_srf->srf;
fb1d9738
JB
1478
1479 ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
1480 if (unlikely(ret != 0)) {
1481 DRM_ERROR("Could not add a reference to a surface.\n");
7a73ba74 1482 goto out_no_reference;
fb1d9738
JB
1483 }
1484
1485 rep->flags = srf->flags;
1486 rep->format = srf->format;
1487 memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
1488 user_sizes = (struct drm_vmw_size __user *)(unsigned long)
1489 rep->size_addr;
1490
1491 if (user_sizes)
1492 ret = copy_to_user(user_sizes, srf->sizes,
1493 srf->num_sizes * sizeof(*srf->sizes));
9b8eb4d1 1494 if (unlikely(ret != 0)) {
fb1d9738
JB
1495 DRM_ERROR("copy_to_user failed %p %u\n",
1496 user_sizes, srf->num_sizes);
9b8eb4d1
DC
1497 ret = -EFAULT;
1498 }
7a73ba74
TH
1499out_bad_resource:
1500out_no_reference:
1501 ttm_base_object_unref(&base);
1502
fb1d9738
JB
1503 return ret;
1504}
1505
1506int vmw_surface_check(struct vmw_private *dev_priv,
1507 struct ttm_object_file *tfile,
7a73ba74 1508 uint32_t handle, int *id)
fb1d9738 1509{
7a73ba74
TH
1510 struct ttm_base_object *base;
1511 struct vmw_user_surface *user_srf;
fb1d9738 1512
7a73ba74 1513 int ret = -EPERM;
fb1d9738 1514
7a73ba74
TH
1515 base = ttm_base_object_lookup(tfile, handle);
1516 if (unlikely(base == NULL))
1517 return -EINVAL;
1518
1519 if (unlikely(base->object_type != VMW_RES_SURFACE))
1520 goto out_bad_surface;
1521
1522 user_srf = container_of(base, struct vmw_user_surface, base);
1523 *id = user_srf->srf.res.id;
1524 ret = 0;
1525
1526out_bad_surface:
1527 /**
1528 * FIXME: May deadlock here when called from the
1529 * command parsing code.
1530 */
fb1d9738 1531
7a73ba74 1532 ttm_base_object_unref(&base);
fb1d9738
JB
1533 return ret;
1534}
1535
1536/**
1537 * Buffer management.
1538 */
effe1105
TH
1539void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
1540{
1541 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
effe1105 1542
fb1d9738
JB
1543 kfree(vmw_bo);
1544}
1545
1546int vmw_dmabuf_init(struct vmw_private *dev_priv,
1547 struct vmw_dma_buffer *vmw_bo,
1548 size_t size, struct ttm_placement *placement,
1549 bool interruptible,
1550 void (*bo_free) (struct ttm_buffer_object *bo))
1551{
1552 struct ttm_bo_device *bdev = &dev_priv->bdev;
fb1d9738
JB
1553 size_t acc_size;
1554 int ret;
1555
1556 BUG_ON(!bo_free);
1557
57de4ba9 1558 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer));
fb1d9738
JB
1559 memset(vmw_bo, 0, sizeof(*vmw_bo));
1560
fb1d9738 1561 INIT_LIST_HEAD(&vmw_bo->validate_list);
fb1d9738
JB
1562
1563 ret = ttm_bo_init(bdev, &vmw_bo->base, size,
1564 ttm_bo_type_device, placement,
0b91c4a1 1565 0, interruptible,
129b78bf 1566 NULL, acc_size, NULL, bo_free);
fb1d9738
JB
1567 return ret;
1568}
1569
1570static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
1571{
1572 struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
fb1d9738 1573
fb1d9738
JB
1574 kfree(vmw_user_bo);
1575}
1576
1577static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
1578{
1579 struct vmw_user_dma_buffer *vmw_user_bo;
1580 struct ttm_base_object *base = *p_base;
1581 struct ttm_buffer_object *bo;
1582
1583 *p_base = NULL;
1584
1585 if (unlikely(base == NULL))
1586 return;
1587
1588 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1589 bo = &vmw_user_bo->dma.base;
1590 ttm_bo_unref(&bo);
1591}
1592
1593int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
1594 struct drm_file *file_priv)
1595{
1596 struct vmw_private *dev_priv = vmw_priv(dev);
1597 union drm_vmw_alloc_dmabuf_arg *arg =
1598 (union drm_vmw_alloc_dmabuf_arg *)data;
1599 struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
1600 struct drm_vmw_dmabuf_rep *rep = &arg->rep;
1601 struct vmw_user_dma_buffer *vmw_user_bo;
1602 struct ttm_buffer_object *tmp;
1603 struct vmw_master *vmaster = vmw_master(file_priv->master);
1604 int ret;
1605
1606 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1607 if (unlikely(vmw_user_bo == NULL))
1608 return -ENOMEM;
1609
1610 ret = ttm_read_lock(&vmaster->lock, true);
1611 if (unlikely(ret != 0)) {
1612 kfree(vmw_user_bo);
1613 return ret;
1614 }
1615
1616 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
8ba5152a 1617 &vmw_vram_sys_placement, true,
fb1d9738
JB
1618 &vmw_user_dmabuf_destroy);
1619 if (unlikely(ret != 0))
2f5993cc 1620 goto out_no_dmabuf;
fb1d9738
JB
1621
1622 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1623 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1624 &vmw_user_bo->base,
1625 false,
1626 ttm_buffer_type,
1627 &vmw_user_dmabuf_release, NULL);
2f5993cc
TH
1628 if (unlikely(ret != 0))
1629 goto out_no_base_object;
1630 else {
fb1d9738
JB
1631 rep->handle = vmw_user_bo->base.hash.key;
1632 rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
1633 rep->cur_gmr_id = vmw_user_bo->base.hash.key;
1634 rep->cur_gmr_offset = 0;
1635 }
fb1d9738 1636
2f5993cc
TH
1637out_no_base_object:
1638 ttm_bo_unref(&tmp);
1639out_no_dmabuf:
fb1d9738
JB
1640 ttm_read_unlock(&vmaster->lock);
1641
2f5993cc 1642 return ret;
fb1d9738
JB
1643}
1644
1645int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
1646 struct drm_file *file_priv)
1647{
1648 struct drm_vmw_unref_dmabuf_arg *arg =
1649 (struct drm_vmw_unref_dmabuf_arg *)data;
1650
1651 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1652 arg->handle,
1653 TTM_REF_USAGE);
1654}
1655
1656uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
1657 uint32_t cur_validate_node)
1658{
1659 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1660
1661 if (likely(vmw_bo->on_validate_list))
1662 return vmw_bo->cur_validate_node;
1663
1664 vmw_bo->cur_validate_node = cur_validate_node;
1665 vmw_bo->on_validate_list = true;
1666
1667 return cur_validate_node;
1668}
1669
1670void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
1671{
1672 struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
1673
1674 vmw_bo->on_validate_list = false;
1675}
1676
fb1d9738
JB
1677int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
1678 uint32_t handle, struct vmw_dma_buffer **out)
1679{
1680 struct vmw_user_dma_buffer *vmw_user_bo;
1681 struct ttm_base_object *base;
1682
1683 base = ttm_base_object_lookup(tfile, handle);
1684 if (unlikely(base == NULL)) {
1685 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1686 (unsigned long)handle);
1687 return -ESRCH;
1688 }
1689
1690 if (unlikely(base->object_type != ttm_buffer_type)) {
1691 ttm_base_object_unref(&base);
1692 printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
1693 (unsigned long)handle);
1694 return -EINVAL;
1695 }
1696
1697 vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
1698 (void)ttm_bo_reference(&vmw_user_bo->dma.base);
1699 ttm_base_object_unref(&base);
1700 *out = &vmw_user_bo->dma;
1701
1702 return 0;
1703}
1704
fb1d9738 1705/*
65155b37 1706 * Stream management
fb1d9738
JB
1707 */
1708
1709static void vmw_stream_destroy(struct vmw_resource *res)
1710{
1711 struct vmw_private *dev_priv = res->dev_priv;
1712 struct vmw_stream *stream;
1713 int ret;
1714
1715 DRM_INFO("%s: unref\n", __func__);
1716 stream = container_of(res, struct vmw_stream, res);
1717
1718 ret = vmw_overlay_unref(dev_priv, stream->stream_id);
1719 WARN_ON(ret != 0);
1720}
1721
1722static int vmw_stream_init(struct vmw_private *dev_priv,
1723 struct vmw_stream *stream,
1724 void (*res_free) (struct vmw_resource *res))
1725{
1726 struct vmw_resource *res = &stream->res;
1727 int ret;
1728
1729 ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
5bb39e81 1730 VMW_RES_STREAM, false, res_free, NULL);
fb1d9738
JB
1731
1732 if (unlikely(ret != 0)) {
1733 if (res_free == NULL)
1734 kfree(stream);
1735 else
1736 res_free(&stream->res);
1737 return ret;
1738 }
1739
1740 ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
1741 if (ret) {
1742 vmw_resource_unreference(&res);
1743 return ret;
1744 }
1745
1746 DRM_INFO("%s: claimed\n", __func__);
1747
1748 vmw_resource_activate(&stream->res, vmw_stream_destroy);
1749 return 0;
1750}
1751
1752/**
1753 * User-space context management:
1754 */
1755
1756static void vmw_user_stream_free(struct vmw_resource *res)
1757{
1758 struct vmw_user_stream *stream =
1759 container_of(res, struct vmw_user_stream, stream.res);
414ee50b 1760 struct vmw_private *dev_priv = res->dev_priv;
fb1d9738
JB
1761
1762 kfree(stream);
414ee50b
TH
1763 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1764 vmw_user_stream_size);
fb1d9738
JB
1765}
1766
1767/**
1768 * This function is called when user space has no more references on the
1769 * base object. It releases the base-object's reference on the resource object.
1770 */
1771
1772static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
1773{
1774 struct ttm_base_object *base = *p_base;
1775 struct vmw_user_stream *stream =
1776 container_of(base, struct vmw_user_stream, base);
1777 struct vmw_resource *res = &stream->stream.res;
1778
1779 *p_base = NULL;
1780 vmw_resource_unreference(&res);
1781}
1782
1783int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
1784 struct drm_file *file_priv)
1785{
1786 struct vmw_private *dev_priv = vmw_priv(dev);
1787 struct vmw_resource *res;
1788 struct vmw_user_stream *stream;
1789 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1790 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1791 int ret = 0;
1792
1793 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
1794 if (unlikely(res == NULL))
1795 return -EINVAL;
1796
1797 if (res->res_free != &vmw_user_stream_free) {
1798 ret = -EINVAL;
1799 goto out;
1800 }
1801
1802 stream = container_of(res, struct vmw_user_stream, stream.res);
1803 if (stream->base.tfile != tfile) {
1804 ret = -EINVAL;
1805 goto out;
1806 }
1807
1808 ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
1809out:
1810 vmw_resource_unreference(&res);
1811 return ret;
1812}
1813
1814int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
1815 struct drm_file *file_priv)
1816{
1817 struct vmw_private *dev_priv = vmw_priv(dev);
414ee50b 1818 struct vmw_user_stream *stream;
fb1d9738
JB
1819 struct vmw_resource *res;
1820 struct vmw_resource *tmp;
1821 struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
1822 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
414ee50b 1823 struct vmw_master *vmaster = vmw_master(file_priv->master);
fb1d9738
JB
1824 int ret;
1825
414ee50b
TH
1826 /*
1827 * Approximate idr memory usage with 128 bytes. It will be limited
1828 * by maximum number_of streams anyway?
1829 */
1830
1831 if (unlikely(vmw_user_stream_size == 0))
1832 vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
1833
1834 ret = ttm_read_lock(&vmaster->lock, true);
1835 if (unlikely(ret != 0))
1836 return ret;
1837
1838 ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
1839 vmw_user_stream_size,
1840 false, true);
1841 if (unlikely(ret != 0)) {
1842 if (ret != -ERESTARTSYS)
1843 DRM_ERROR("Out of graphics memory for stream"
1844 " creation.\n");
1845 goto out_unlock;
1846 }
1847
1848
1849 stream = kmalloc(sizeof(*stream), GFP_KERNEL);
1850 if (unlikely(stream == NULL)) {
1851 ttm_mem_global_free(vmw_mem_glob(dev_priv),
1852 vmw_user_stream_size);
1853 ret = -ENOMEM;
1854 goto out_unlock;
1855 }
fb1d9738
JB
1856
1857 res = &stream->stream.res;
1858 stream->base.shareable = false;
1859 stream->base.tfile = NULL;
1860
414ee50b
TH
1861 /*
1862 * From here on, the destructor takes over resource freeing.
1863 */
1864
fb1d9738
JB
1865 ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
1866 if (unlikely(ret != 0))
414ee50b 1867 goto out_unlock;
fb1d9738
JB
1868
1869 tmp = vmw_resource_reference(res);
1870 ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
1871 &vmw_user_stream_base_release, NULL);
1872
1873 if (unlikely(ret != 0)) {
1874 vmw_resource_unreference(&tmp);
1875 goto out_err;
1876 }
1877
1878 arg->stream_id = res->id;
1879out_err:
1880 vmw_resource_unreference(&res);
414ee50b
TH
1881out_unlock:
1882 ttm_read_unlock(&vmaster->lock);
fb1d9738
JB
1883 return ret;
1884}
1885
1886int vmw_user_stream_lookup(struct vmw_private *dev_priv,
1887 struct ttm_object_file *tfile,
1888 uint32_t *inout_id, struct vmw_resource **out)
1889{
1890 struct vmw_user_stream *stream;
1891 struct vmw_resource *res;
1892 int ret;
1893
1894 res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
1895 if (unlikely(res == NULL))
1896 return -EINVAL;
1897
1898 if (res->res_free != &vmw_user_stream_free) {
1899 ret = -EINVAL;
1900 goto err_ref;
1901 }
1902
1903 stream = container_of(res, struct vmw_user_stream, stream.res);
1904 if (stream->base.tfile != tfile) {
1905 ret = -EPERM;
1906 goto err_ref;
1907 }
1908
1909 *inout_id = stream->stream.stream_id;
1910 *out = res;
1911 return 0;
1912err_ref:
1913 vmw_resource_unreference(&res);
1914 return ret;
1915}
5e1782d2
DA
1916
1917
1918int vmw_dumb_create(struct drm_file *file_priv,
1919 struct drm_device *dev,
1920 struct drm_mode_create_dumb *args)
1921{
1922 struct vmw_private *dev_priv = vmw_priv(dev);
1923 struct vmw_master *vmaster = vmw_master(file_priv->master);
1924 struct vmw_user_dma_buffer *vmw_user_bo;
1925 struct ttm_buffer_object *tmp;
1926 int ret;
1927
1928 args->pitch = args->width * ((args->bpp + 7) / 8);
1929 args->size = args->pitch * args->height;
1930
1931 vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
1932 if (vmw_user_bo == NULL)
1933 return -ENOMEM;
1934
1935 ret = ttm_read_lock(&vmaster->lock, true);
1936 if (ret != 0) {
1937 kfree(vmw_user_bo);
1938 return ret;
1939 }
1940
1941 ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size,
1942 &vmw_vram_sys_placement, true,
1943 &vmw_user_dmabuf_destroy);
1944 if (ret != 0)
1945 goto out_no_dmabuf;
1946
1947 tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
1948 ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
1949 &vmw_user_bo->base,
1950 false,
1951 ttm_buffer_type,
1952 &vmw_user_dmabuf_release, NULL);
1953 if (unlikely(ret != 0))
1954 goto out_no_base_object;
1955
1956 args->handle = vmw_user_bo->base.hash.key;
1957
1958out_no_base_object:
1959 ttm_bo_unref(&tmp);
1960out_no_dmabuf:
1961 ttm_read_unlock(&vmaster->lock);
1962 return ret;
1963}
1964
1965int vmw_dumb_map_offset(struct drm_file *file_priv,
1966 struct drm_device *dev, uint32_t handle,
1967 uint64_t *offset)
1968{
1969 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
1970 struct vmw_dma_buffer *out_buf;
1971 int ret;
1972
1973 ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
1974 if (ret != 0)
1975 return -EINVAL;
1976
1977 *offset = out_buf->base.addr_space_offset;
1978 vmw_dmabuf_unreference(&out_buf);
1979 return 0;
1980}
1981
1982int vmw_dumb_destroy(struct drm_file *file_priv,
1983 struct drm_device *dev,
1984 uint32_t handle)
1985{
1986 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
1987 handle, TTM_REF_USAGE);
1988}
This page took 0.269079 seconds and 5 git commands to generate.