drm/vmwgfx: Add MOB management
[deliverable/linux.git] / drivers / gpu / drm / vmwgfx / vmwgfx_buffer.c
CommitLineData
fb1d9738
JB
1/**************************************************************************
2 *
3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28#include "vmwgfx_drv.h"
760285e7
DH
29#include <drm/ttm/ttm_bo_driver.h>
30#include <drm/ttm/ttm_placement.h>
31#include <drm/ttm/ttm_page_alloc.h>
fb1d9738
JB
32
33static uint32_t vram_placement_flags = TTM_PL_FLAG_VRAM |
34 TTM_PL_FLAG_CACHED;
35
36static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
37 TTM_PL_FLAG_CACHED |
38 TTM_PL_FLAG_NO_EVICT;
39
40static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
41 TTM_PL_FLAG_CACHED;
42
3530bdc3
TH
43static uint32_t sys_ne_placement_flags = TTM_PL_FLAG_SYSTEM |
44 TTM_PL_FLAG_CACHED |
45 TTM_PL_FLAG_NO_EVICT;
46
135cba0d
TH
47static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
48 TTM_PL_FLAG_CACHED;
49
d991ef03
JB
50static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
51 TTM_PL_FLAG_CACHED |
52 TTM_PL_FLAG_NO_EVICT;
53
fb1d9738
JB
54struct ttm_placement vmw_vram_placement = {
55 .fpfn = 0,
56 .lpfn = 0,
57 .num_placement = 1,
58 .placement = &vram_placement_flags,
59 .num_busy_placement = 1,
60 .busy_placement = &vram_placement_flags
61};
62
135cba0d
TH
63static uint32_t vram_gmr_placement_flags[] = {
64 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
65 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
66};
67
5bb39e81
TH
68static uint32_t gmr_vram_placement_flags[] = {
69 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
70 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
71};
72
135cba0d
TH
73struct ttm_placement vmw_vram_gmr_placement = {
74 .fpfn = 0,
75 .lpfn = 0,
76 .num_placement = 2,
77 .placement = vram_gmr_placement_flags,
78 .num_busy_placement = 1,
79 .busy_placement = &gmr_placement_flags
80};
81
d991ef03
JB
82static uint32_t vram_gmr_ne_placement_flags[] = {
83 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
84 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
85};
86
87struct ttm_placement vmw_vram_gmr_ne_placement = {
88 .fpfn = 0,
89 .lpfn = 0,
90 .num_placement = 2,
91 .placement = vram_gmr_ne_placement_flags,
92 .num_busy_placement = 1,
93 .busy_placement = &gmr_ne_placement_flags
94};
95
8ba5152a
TH
96struct ttm_placement vmw_vram_sys_placement = {
97 .fpfn = 0,
98 .lpfn = 0,
99 .num_placement = 1,
100 .placement = &vram_placement_flags,
101 .num_busy_placement = 1,
102 .busy_placement = &sys_placement_flags
103};
104
fb1d9738
JB
105struct ttm_placement vmw_vram_ne_placement = {
106 .fpfn = 0,
107 .lpfn = 0,
108 .num_placement = 1,
109 .placement = &vram_ne_placement_flags,
110 .num_busy_placement = 1,
111 .busy_placement = &vram_ne_placement_flags
112};
113
114struct ttm_placement vmw_sys_placement = {
115 .fpfn = 0,
116 .lpfn = 0,
117 .num_placement = 1,
118 .placement = &sys_placement_flags,
119 .num_busy_placement = 1,
120 .busy_placement = &sys_placement_flags
121};
122
3530bdc3
TH
123struct ttm_placement vmw_sys_ne_placement = {
124 .fpfn = 0,
125 .lpfn = 0,
126 .num_placement = 1,
127 .placement = &sys_ne_placement_flags,
128 .num_busy_placement = 1,
129 .busy_placement = &sys_ne_placement_flags
130};
131
d991ef03
JB
132static uint32_t evictable_placement_flags[] = {
133 TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
134 TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
135 VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
136};
137
138struct ttm_placement vmw_evictable_placement = {
139 .fpfn = 0,
140 .lpfn = 0,
141 .num_placement = 3,
142 .placement = evictable_placement_flags,
143 .num_busy_placement = 1,
144 .busy_placement = &sys_placement_flags
145};
146
5bb39e81
TH
147struct ttm_placement vmw_srf_placement = {
148 .fpfn = 0,
149 .lpfn = 0,
150 .num_placement = 1,
151 .num_busy_placement = 2,
152 .placement = &gmr_placement_flags,
153 .busy_placement = gmr_vram_placement_flags
154};
155
649bf3ca 156struct vmw_ttm_tt {
d92d9851 157 struct ttm_dma_tt dma_ttm;
135cba0d
TH
158 struct vmw_private *dev_priv;
159 int gmr_id;
d92d9851
TH
160 struct sg_table sgt;
161 struct vmw_sg_table vsgt;
162 uint64_t sg_alloc_size;
163 bool mapped;
fb1d9738
JB
164};
165
308d17ef
TH
166const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
167
d92d9851
TH
168/**
169 * Helper functions to advance a struct vmw_piter iterator.
170 *
171 * @viter: Pointer to the iterator.
172 *
173 * These functions return false if past the end of the list,
174 * true otherwise. Functions are selected depending on the current
175 * DMA mapping mode.
176 */
177static bool __vmw_piter_non_sg_next(struct vmw_piter *viter)
178{
179 return ++(viter->i) < viter->num_pages;
180}
181
182static bool __vmw_piter_sg_next(struct vmw_piter *viter)
183{
184 return __sg_page_iter_next(&viter->iter);
185}
186
187
188/**
189 * Helper functions to return a pointer to the current page.
190 *
191 * @viter: Pointer to the iterator
192 *
193 * These functions return a pointer to the page currently
194 * pointed to by @viter. Functions are selected depending on the
195 * current mapping mode.
196 */
197static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
198{
199 return viter->pages[viter->i];
200}
201
202static struct page *__vmw_piter_sg_page(struct vmw_piter *viter)
203{
204 return sg_page_iter_page(&viter->iter);
205}
206
207
208/**
209 * Helper functions to return the DMA address of the current page.
210 *
211 * @viter: Pointer to the iterator
212 *
213 * These functions return the DMA address of the page currently
214 * pointed to by @viter. Functions are selected depending on the
215 * current mapping mode.
216 */
217static dma_addr_t __vmw_piter_phys_addr(struct vmw_piter *viter)
218{
219 return page_to_phys(viter->pages[viter->i]);
220}
221
222static dma_addr_t __vmw_piter_dma_addr(struct vmw_piter *viter)
223{
224 return viter->addrs[viter->i];
225}
226
227static dma_addr_t __vmw_piter_sg_addr(struct vmw_piter *viter)
228{
229 return sg_page_iter_dma_address(&viter->iter);
230}
231
232
233/**
234 * vmw_piter_start - Initialize a struct vmw_piter.
235 *
236 * @viter: Pointer to the iterator to initialize
237 * @vsgt: Pointer to a struct vmw_sg_table to initialize from
238 *
239 * Note that we're following the convention of __sg_page_iter_start, so that
240 * the iterator doesn't point to a valid page after initialization; it has
241 * to be advanced one step first.
242 */
243void vmw_piter_start(struct vmw_piter *viter, const struct vmw_sg_table *vsgt,
244 unsigned long p_offset)
245{
246 viter->i = p_offset - 1;
247 viter->num_pages = vsgt->num_pages;
248 switch (vsgt->mode) {
249 case vmw_dma_phys:
250 viter->next = &__vmw_piter_non_sg_next;
251 viter->dma_address = &__vmw_piter_phys_addr;
252 viter->page = &__vmw_piter_non_sg_page;
253 viter->pages = vsgt->pages;
254 break;
255 case vmw_dma_alloc_coherent:
256 viter->next = &__vmw_piter_non_sg_next;
257 viter->dma_address = &__vmw_piter_dma_addr;
258 viter->page = &__vmw_piter_non_sg_page;
259 viter->addrs = vsgt->addrs;
260 break;
261 case vmw_dma_map_populate:
262 case vmw_dma_map_bind:
263 viter->next = &__vmw_piter_sg_next;
264 viter->dma_address = &__vmw_piter_sg_addr;
265 viter->page = &__vmw_piter_sg_page;
266 __sg_page_iter_start(&viter->iter, vsgt->sgt->sgl,
267 vsgt->sgt->orig_nents, p_offset);
268 break;
269 default:
270 BUG();
271 }
272}
273
274/**
275 * vmw_ttm_unmap_from_dma - unmap device addresses previsouly mapped for
276 * TTM pages
277 *
278 * @vmw_tt: Pointer to a struct vmw_ttm_backend
279 *
280 * Used to free dma mappings previously mapped by vmw_ttm_map_for_dma.
281 */
282static void vmw_ttm_unmap_from_dma(struct vmw_ttm_tt *vmw_tt)
283{
284 struct device *dev = vmw_tt->dev_priv->dev->dev;
285
286 dma_unmap_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.nents,
287 DMA_BIDIRECTIONAL);
288 vmw_tt->sgt.nents = vmw_tt->sgt.orig_nents;
289}
290
291/**
292 * vmw_ttm_map_for_dma - map TTM pages to get device addresses
293 *
294 * @vmw_tt: Pointer to a struct vmw_ttm_backend
295 *
296 * This function is used to get device addresses from the kernel DMA layer.
297 * However, it's violating the DMA API in that when this operation has been
298 * performed, it's illegal for the CPU to write to the pages without first
299 * unmapping the DMA mappings, or calling dma_sync_sg_for_cpu(). It is
300 * therefore only legal to call this function if we know that the function
301 * dma_sync_sg_for_cpu() is a NOP, and dma_sync_sg_for_device() is at most
302 * a CPU write buffer flush.
303 */
304static int vmw_ttm_map_for_dma(struct vmw_ttm_tt *vmw_tt)
305{
306 struct device *dev = vmw_tt->dev_priv->dev->dev;
307 int ret;
308
309 ret = dma_map_sg(dev, vmw_tt->sgt.sgl, vmw_tt->sgt.orig_nents,
310 DMA_BIDIRECTIONAL);
311 if (unlikely(ret == 0))
312 return -ENOMEM;
313
314 vmw_tt->sgt.nents = ret;
315
316 return 0;
317}
318
319/**
320 * vmw_ttm_map_dma - Make sure TTM pages are visible to the device
321 *
322 * @vmw_tt: Pointer to a struct vmw_ttm_tt
323 *
324 * Select the correct function for and make sure the TTM pages are
325 * visible to the device. Allocate storage for the device mappings.
326 * If a mapping has already been performed, indicated by the storage
327 * pointer being non NULL, the function returns success.
328 */
329static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt)
330{
331 struct vmw_private *dev_priv = vmw_tt->dev_priv;
332 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
333 struct vmw_sg_table *vsgt = &vmw_tt->vsgt;
334 struct vmw_piter iter;
335 dma_addr_t old;
336 int ret = 0;
337 static size_t sgl_size;
338 static size_t sgt_size;
339
340 if (vmw_tt->mapped)
341 return 0;
342
343 vsgt->mode = dev_priv->map_mode;
344 vsgt->pages = vmw_tt->dma_ttm.ttm.pages;
345 vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages;
346 vsgt->addrs = vmw_tt->dma_ttm.dma_address;
347 vsgt->sgt = &vmw_tt->sgt;
348
349 switch (dev_priv->map_mode) {
350 case vmw_dma_map_bind:
351 case vmw_dma_map_populate:
352 if (unlikely(!sgl_size)) {
353 sgl_size = ttm_round_pot(sizeof(struct scatterlist));
354 sgt_size = ttm_round_pot(sizeof(struct sg_table));
355 }
356 vmw_tt->sg_alloc_size = sgt_size + sgl_size * vsgt->num_pages;
357 ret = ttm_mem_global_alloc(glob, vmw_tt->sg_alloc_size, false,
358 true);
359 if (unlikely(ret != 0))
360 return ret;
361
362 ret = sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages,
363 vsgt->num_pages, 0,
364 (unsigned long)
365 vsgt->num_pages << PAGE_SHIFT,
366 GFP_KERNEL);
367 if (unlikely(ret != 0))
368 goto out_sg_alloc_fail;
369
370 if (vsgt->num_pages > vmw_tt->sgt.nents) {
371 uint64_t over_alloc =
372 sgl_size * (vsgt->num_pages -
373 vmw_tt->sgt.nents);
374
375 ttm_mem_global_free(glob, over_alloc);
376 vmw_tt->sg_alloc_size -= over_alloc;
377 }
378
379 ret = vmw_ttm_map_for_dma(vmw_tt);
380 if (unlikely(ret != 0))
381 goto out_map_fail;
382
383 break;
384 default:
385 break;
386 }
387
388 old = ~((dma_addr_t) 0);
389 vmw_tt->vsgt.num_regions = 0;
390 for (vmw_piter_start(&iter, vsgt, 0); vmw_piter_next(&iter);) {
391 dma_addr_t cur = vmw_piter_dma_addr(&iter);
392
393 if (cur != old + PAGE_SIZE)
394 vmw_tt->vsgt.num_regions++;
395 old = cur;
396 }
397
398 vmw_tt->mapped = true;
399 return 0;
400
401out_map_fail:
402 sg_free_table(vmw_tt->vsgt.sgt);
403 vmw_tt->vsgt.sgt = NULL;
404out_sg_alloc_fail:
405 ttm_mem_global_free(glob, vmw_tt->sg_alloc_size);
406 return ret;
407}
408
409/**
410 * vmw_ttm_unmap_dma - Tear down any TTM page device mappings
411 *
412 * @vmw_tt: Pointer to a struct vmw_ttm_tt
413 *
414 * Tear down any previously set up device DMA mappings and free
415 * any storage space allocated for them. If there are no mappings set up,
416 * this function is a NOP.
417 */
418static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt)
419{
420 struct vmw_private *dev_priv = vmw_tt->dev_priv;
421
422 if (!vmw_tt->vsgt.sgt)
423 return;
424
425 switch (dev_priv->map_mode) {
426 case vmw_dma_map_bind:
427 case vmw_dma_map_populate:
428 vmw_ttm_unmap_from_dma(vmw_tt);
429 sg_free_table(vmw_tt->vsgt.sgt);
430 vmw_tt->vsgt.sgt = NULL;
431 ttm_mem_global_free(vmw_mem_glob(dev_priv),
432 vmw_tt->sg_alloc_size);
433 break;
434 default:
435 break;
436 }
437 vmw_tt->mapped = false;
438}
439
649bf3ca 440static int vmw_ttm_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
fb1d9738 441{
d92d9851
TH
442 struct vmw_ttm_tt *vmw_be =
443 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
444 int ret;
445
446 ret = vmw_ttm_map_dma(vmw_be);
447 if (unlikely(ret != 0))
448 return ret;
135cba0d
TH
449
450 vmw_be->gmr_id = bo_mem->start;
451
d92d9851 452 return vmw_gmr_bind(vmw_be->dev_priv, &vmw_be->vsgt,
649bf3ca 453 ttm->num_pages, vmw_be->gmr_id);
fb1d9738
JB
454}
455
649bf3ca 456static int vmw_ttm_unbind(struct ttm_tt *ttm)
fb1d9738 457{
d92d9851
TH
458 struct vmw_ttm_tt *vmw_be =
459 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
135cba0d
TH
460
461 vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
d92d9851
TH
462
463 if (vmw_be->dev_priv->map_mode == vmw_dma_map_bind)
464 vmw_ttm_unmap_dma(vmw_be);
465
fb1d9738
JB
466 return 0;
467}
468
649bf3ca 469static void vmw_ttm_destroy(struct ttm_tt *ttm)
fb1d9738 470{
d92d9851
TH
471 struct vmw_ttm_tt *vmw_be =
472 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
473
474 vmw_ttm_unmap_dma(vmw_be);
475 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
476 ttm_dma_tt_fini(&vmw_be->dma_ttm);
477 else
478 ttm_tt_fini(ttm);
fb1d9738
JB
479 kfree(vmw_be);
480}
481
d92d9851
TH
482static int vmw_ttm_populate(struct ttm_tt *ttm)
483{
484 struct vmw_ttm_tt *vmw_tt =
485 container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
486 struct vmw_private *dev_priv = vmw_tt->dev_priv;
487 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
488 int ret;
489
490 if (ttm->state != tt_unpopulated)
491 return 0;
492
493 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
494 size_t size =
495 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
496 ret = ttm_mem_global_alloc(glob, size, false, true);
497 if (unlikely(ret != 0))
498 return ret;
499
500 ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
501 if (unlikely(ret != 0))
502 ttm_mem_global_free(glob, size);
503 } else
504 ret = ttm_pool_populate(ttm);
505
506 return ret;
507}
508
509static void vmw_ttm_unpopulate(struct ttm_tt *ttm)
510{
511 struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt,
512 dma_ttm.ttm);
513 struct vmw_private *dev_priv = vmw_tt->dev_priv;
514 struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
515
516 vmw_ttm_unmap_dma(vmw_tt);
517 if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
518 size_t size =
519 ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
520
521 ttm_dma_unpopulate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
522 ttm_mem_global_free(glob, size);
523 } else
524 ttm_pool_unpopulate(ttm);
525}
526
fb1d9738 527static struct ttm_backend_func vmw_ttm_func = {
fb1d9738
JB
528 .bind = vmw_ttm_bind,
529 .unbind = vmw_ttm_unbind,
530 .destroy = vmw_ttm_destroy,
531};
532
8227622f 533static struct ttm_tt *vmw_ttm_tt_create(struct ttm_bo_device *bdev,
649bf3ca
JG
534 unsigned long size, uint32_t page_flags,
535 struct page *dummy_read_page)
fb1d9738 536{
649bf3ca 537 struct vmw_ttm_tt *vmw_be;
d92d9851 538 int ret;
fb1d9738 539
d92d9851 540 vmw_be = kzalloc(sizeof(*vmw_be), GFP_KERNEL);
fb1d9738
JB
541 if (!vmw_be)
542 return NULL;
543
d92d9851 544 vmw_be->dma_ttm.ttm.func = &vmw_ttm_func;
135cba0d 545 vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
fb1d9738 546
d92d9851
TH
547 if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent)
548 ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bdev, size, page_flags,
549 dummy_read_page);
550 else
551 ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bdev, size, page_flags,
552 dummy_read_page);
553 if (unlikely(ret != 0))
554 goto out_no_init;
555
556 return &vmw_be->dma_ttm.ttm;
557out_no_init:
558 kfree(vmw_be);
559 return NULL;
fb1d9738
JB
560}
561
8227622f 562static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
fb1d9738
JB
563{
564 return 0;
565}
566
8227622f 567static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
fb1d9738
JB
568 struct ttm_mem_type_manager *man)
569{
fb1d9738
JB
570 switch (type) {
571 case TTM_PL_SYSTEM:
572 /* System memory */
573
574 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
135cba0d 575 man->available_caching = TTM_PL_FLAG_CACHED;
fb1d9738
JB
576 man->default_caching = TTM_PL_FLAG_CACHED;
577 break;
578 case TTM_PL_VRAM:
579 /* "On-card" video ram */
d961db75 580 man->func = &ttm_bo_manager_func;
fb1d9738 581 man->gpu_offset = 0;
96bf8b87 582 man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
135cba0d
TH
583 man->available_caching = TTM_PL_FLAG_CACHED;
584 man->default_caching = TTM_PL_FLAG_CACHED;
585 break;
586 case VMW_PL_GMR:
587 /*
588 * "Guest Memory Regions" is an aperture like feature with
589 * one slot per bo. There is an upper limit of the number of
590 * slots as well as the bo size.
591 */
592 man->func = &vmw_gmrid_manager_func;
593 man->gpu_offset = 0;
594 man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
595 man->available_caching = TTM_PL_FLAG_CACHED;
596 man->default_caching = TTM_PL_FLAG_CACHED;
fb1d9738
JB
597 break;
598 default:
599 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
600 return -EINVAL;
601 }
602 return 0;
603}
604
8227622f 605static void vmw_evict_flags(struct ttm_buffer_object *bo,
fb1d9738
JB
606 struct ttm_placement *placement)
607{
608 *placement = vmw_sys_placement;
609}
610
fb1d9738
JB
611static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
612{
d08a9b9c
TH
613 struct ttm_object_file *tfile =
614 vmw_fpriv((struct drm_file *)filp->private_data)->tfile;
615
616 return vmw_user_dmabuf_verify_access(bo, tfile);
fb1d9738
JB
617}
618
96bf8b87
JG
619static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
620{
621 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
622 struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev);
623
624 mem->bus.addr = NULL;
625 mem->bus.is_iomem = false;
626 mem->bus.offset = 0;
627 mem->bus.size = mem->num_pages << PAGE_SHIFT;
628 mem->bus.base = 0;
629 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
630 return -EINVAL;
631 switch (mem->mem_type) {
632 case TTM_PL_SYSTEM:
135cba0d 633 case VMW_PL_GMR:
96bf8b87
JG
634 return 0;
635 case TTM_PL_VRAM:
d961db75 636 mem->bus.offset = mem->start << PAGE_SHIFT;
96bf8b87
JG
637 mem->bus.base = dev_priv->vram_start;
638 mem->bus.is_iomem = true;
639 break;
640 default:
641 return -EINVAL;
642 }
643 return 0;
644}
645
646static void vmw_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
647{
648}
649
650static int vmw_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
651{
652 return 0;
653}
654
fb1d9738
JB
655/**
656 * FIXME: We're using the old vmware polling method to sync.
657 * Do this with fences instead.
658 */
659
660static void *vmw_sync_obj_ref(void *sync_obj)
661{
ae2a1040
TH
662
663 return (void *)
664 vmw_fence_obj_reference((struct vmw_fence_obj *) sync_obj);
fb1d9738
JB
665}
666
667static void vmw_sync_obj_unref(void **sync_obj)
668{
ae2a1040 669 vmw_fence_obj_unreference((struct vmw_fence_obj **) sync_obj);
fb1d9738
JB
670}
671
dedfdffd 672static int vmw_sync_obj_flush(void *sync_obj)
fb1d9738 673{
ae2a1040 674 vmw_fence_obj_flush((struct vmw_fence_obj *) sync_obj);
fb1d9738
JB
675 return 0;
676}
677
dedfdffd 678static bool vmw_sync_obj_signaled(void *sync_obj)
fb1d9738 679{
ae2a1040 680 return vmw_fence_obj_signaled((struct vmw_fence_obj *) sync_obj,
be013367 681 DRM_VMW_FENCE_FLAG_EXEC);
fb1d9738 682
fb1d9738
JB
683}
684
dedfdffd 685static int vmw_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible)
fb1d9738 686{
ae2a1040 687 return vmw_fence_obj_wait((struct vmw_fence_obj *) sync_obj,
be013367 688 DRM_VMW_FENCE_FLAG_EXEC,
ae2a1040
TH
689 lazy, interruptible,
690 VMW_FENCE_WAIT_TIMEOUT);
fb1d9738
JB
691}
692
693struct ttm_bo_driver vmw_bo_driver = {
649bf3ca 694 .ttm_tt_create = &vmw_ttm_tt_create,
d92d9851
TH
695 .ttm_tt_populate = &vmw_ttm_populate,
696 .ttm_tt_unpopulate = &vmw_ttm_unpopulate,
fb1d9738
JB
697 .invalidate_caches = vmw_invalidate_caches,
698 .init_mem_type = vmw_init_mem_type,
699 .evict_flags = vmw_evict_flags,
700 .move = NULL,
701 .verify_access = vmw_verify_access,
702 .sync_obj_signaled = vmw_sync_obj_signaled,
703 .sync_obj_wait = vmw_sync_obj_wait,
704 .sync_obj_flush = vmw_sync_obj_flush,
705 .sync_obj_unref = vmw_sync_obj_unref,
effe1105 706 .sync_obj_ref = vmw_sync_obj_ref,
135cba0d
TH
707 .move_notify = NULL,
708 .swap_notify = NULL,
96bf8b87
JG
709 .fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
710 .io_mem_reserve = &vmw_ttm_io_mem_reserve,
711 .io_mem_free = &vmw_ttm_io_mem_free,
fb1d9738 712};
This page took 0.253135 seconds and 5 git commands to generate.