drm/nouveau: fix backlight mask on ppc powerbook
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
ebb945a9 30#include <core/engine.h>
3e2b756b 31#include <linux/swiotlb.h>
6ee73861 32
ebb945a9
BS
33#include <subdev/fb.h>
34#include <subdev/vm.h>
35#include <subdev/bar.h>
36
37#include "nouveau_drm.h"
6ee73861 38#include "nouveau_dma.h"
d375e7d5 39#include "nouveau_fence.h"
6ee73861 40
ebb945a9
BS
41#include "nouveau_bo.h"
42#include "nouveau_ttm.h"
43#include "nouveau_gem.h"
a510604d 44
bc9e7b9a
BS
45/*
46 * NV10-NV40 tiling helpers
47 */
48
49static void
ebb945a9
BS
50nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
51 u32 addr, u32 size, u32 pitch, u32 flags)
bc9e7b9a 52{
77145f1c 53 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9
BS
54 int i = reg - drm->tile.reg;
55 struct nouveau_fb *pfb = nouveau_fb(drm->device);
56 struct nouveau_fb_tile *tile = &pfb->tile.region[i];
57 struct nouveau_engine *engine;
bc9e7b9a 58
ebb945a9 59 nouveau_fence_unref(&reg->fence);
bc9e7b9a
BS
60
61 if (tile->pitch)
ebb945a9 62 pfb->tile.fini(pfb, i, tile);
bc9e7b9a
BS
63
64 if (pitch)
ebb945a9 65 pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
bc9e7b9a 66
ebb945a9 67 pfb->tile.prog(pfb, i, tile);
bc9e7b9a 68
ebb945a9
BS
69 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
70 engine->tile_prog(engine, i);
71 if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
72 engine->tile_prog(engine, i);
bc9e7b9a
BS
73}
74
ebb945a9 75static struct nouveau_drm_tile *
bc9e7b9a
BS
76nv10_bo_get_tile_region(struct drm_device *dev, int i)
77{
77145f1c 78 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9 79 struct nouveau_drm_tile *tile = &drm->tile.reg[i];
bc9e7b9a 80
ebb945a9 81 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
82
83 if (!tile->used &&
84 (!tile->fence || nouveau_fence_done(tile->fence)))
85 tile->used = true;
86 else
87 tile = NULL;
88
ebb945a9 89 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
90 return tile;
91}
92
93static void
ebb945a9
BS
94nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
95 struct nouveau_fence *fence)
bc9e7b9a 96{
77145f1c 97 struct nouveau_drm *drm = nouveau_drm(dev);
bc9e7b9a
BS
98
99 if (tile) {
ebb945a9 100 spin_lock(&drm->tile.lock);
bc9e7b9a
BS
101 if (fence) {
102 /* Mark it as pending. */
103 tile->fence = fence;
104 nouveau_fence_ref(fence);
105 }
106
107 tile->used = false;
ebb945a9 108 spin_unlock(&drm->tile.lock);
bc9e7b9a
BS
109 }
110}
111
ebb945a9
BS
112static struct nouveau_drm_tile *
113nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
114 u32 size, u32 pitch, u32 flags)
bc9e7b9a 115{
77145f1c 116 struct nouveau_drm *drm = nouveau_drm(dev);
ebb945a9
BS
117 struct nouveau_fb *pfb = nouveau_fb(drm->device);
118 struct nouveau_drm_tile *tile, *found = NULL;
bc9e7b9a
BS
119 int i;
120
ebb945a9 121 for (i = 0; i < pfb->tile.regions; i++) {
bc9e7b9a
BS
122 tile = nv10_bo_get_tile_region(dev, i);
123
124 if (pitch && !found) {
125 found = tile;
126 continue;
127
ebb945a9 128 } else if (tile && pfb->tile.region[i].pitch) {
bc9e7b9a
BS
129 /* Kill an unused tile region. */
130 nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
131 }
132
133 nv10_bo_put_tile_region(dev, tile, NULL);
134 }
135
136 if (found)
137 nv10_bo_update_tile_region(dev, found, addr, size,
138 pitch, flags);
139 return found;
140}
141
6ee73861
BS
142static void
143nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
144{
ebb945a9
BS
145 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
146 struct drm_device *dev = drm->dev;
6ee73861
BS
147 struct nouveau_bo *nvbo = nouveau_bo(bo);
148
55fb74ad 149 if (unlikely(nvbo->gem.filp))
6ee73861 150 DRM_ERROR("bo %p still attached to GEM object\n", bo);
4f385599 151 WARN_ON(nvbo->pin_refcnt > 0);
bc9e7b9a 152 nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
153 kfree(nvbo);
154}
155
a0af9add 156static void
db5c8e29 157nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 158 int *align, int *size)
a0af9add 159{
ebb945a9
BS
160 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
161 struct nouveau_device *device = nv_device(drm->device);
a0af9add 162
ebb945a9 163 if (device->card_type < NV_50) {
bfd83aca 164 if (nvbo->tile_mode) {
ebb945a9 165 if (device->chipset >= 0x40) {
a0af9add 166 *align = 65536;
bfd83aca 167 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 168
ebb945a9 169 } else if (device->chipset >= 0x30) {
a0af9add 170 *align = 32768;
bfd83aca 171 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 172
ebb945a9 173 } else if (device->chipset >= 0x20) {
a0af9add 174 *align = 16384;
bfd83aca 175 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add 176
ebb945a9 177 } else if (device->chipset >= 0x10) {
a0af9add 178 *align = 16384;
bfd83aca 179 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
180 }
181 }
bfd83aca 182 } else {
f91bac5b
BS
183 *size = roundup(*size, (1 << nvbo->page_shift));
184 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
185 }
186
1c7059e4 187 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
188}
189
6ee73861 190int
7375c95b
BS
191nouveau_bo_new(struct drm_device *dev, int size, int align,
192 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
22b33e8e 193 struct sg_table *sg,
7375c95b 194 struct nouveau_bo **pnvbo)
6ee73861 195{
77145f1c 196 struct nouveau_drm *drm = nouveau_drm(dev);
6ee73861 197 struct nouveau_bo *nvbo;
57de4ba9 198 size_t acc_size;
f91bac5b 199 int ret;
22b33e8e 200 int type = ttm_bo_type_device;
35095f75
ML
201 int lpg_shift = 12;
202 int max_size;
203
204 if (drm->client.base.vm)
205 lpg_shift = drm->client.base.vm->vmm->lpg_shift;
206 max_size = INT_MAX & ~((1 << lpg_shift) - 1);
0108bc80
ML
207
208 if (size <= 0 || size > max_size) {
209 nv_warn(drm, "skipped size %x\n", (u32)size);
210 return -EINVAL;
211 }
22b33e8e
DA
212
213 if (sg)
214 type = ttm_bo_type_sg;
6ee73861
BS
215
216 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
217 if (!nvbo)
218 return -ENOMEM;
219 INIT_LIST_HEAD(&nvbo->head);
220 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 221 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
222 nvbo->tile_mode = tile_mode;
223 nvbo->tile_flags = tile_flags;
ebb945a9 224 nvbo->bo.bdev = &drm->ttm.bdev;
6ee73861 225
f91bac5b 226 nvbo->page_shift = 12;
ebb945a9 227 if (drm->client.base.vm) {
f91bac5b 228 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
ebb945a9 229 nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
f91bac5b
BS
230 }
231
232 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
233 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
234 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 235
ebb945a9 236 acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
57de4ba9
JG
237 sizeof(struct nouveau_bo));
238
ebb945a9 239 ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
22b33e8e 240 type, &nvbo->placement,
0b91c4a1 241 align >> PAGE_SHIFT, false, NULL, acc_size, sg,
fd2871af 242 nouveau_bo_del_ttm);
6ee73861
BS
243 if (ret) {
244 /* ttm will call nouveau_bo_del_ttm if it fails.. */
245 return ret;
246 }
247
6ee73861
BS
248 *pnvbo = nvbo;
249 return 0;
250}
251
78ad0f7b
FJ
252static void
253set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
254{
255 *n = 0;
256
257 if (type & TTM_PL_FLAG_VRAM)
258 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
259 if (type & TTM_PL_FLAG_TT)
260 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
261 if (type & TTM_PL_FLAG_SYSTEM)
262 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
263}
264
699ddfd9
FJ
265static void
266set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
267{
ebb945a9
BS
268 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
269 struct nouveau_fb *pfb = nouveau_fb(drm->device);
dceef5d8 270 u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
699ddfd9 271
ebb945a9 272 if (nv_device(drm->device)->card_type == NV_10 &&
812f219a 273 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 274 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
275 /*
276 * Make sure that the color and depth buffers are handled
277 * by independent memory controller units. Up to a 9x
278 * speed up when alpha-blending and depth-test are enabled
279 * at the same time.
280 */
699ddfd9
FJ
281 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
282 nvbo->placement.fpfn = vram_pages / 2;
283 nvbo->placement.lpfn = ~0;
284 } else {
285 nvbo->placement.fpfn = 0;
286 nvbo->placement.lpfn = vram_pages / 2;
287 }
288 }
289}
290
6ee73861 291void
78ad0f7b 292nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 293{
78ad0f7b
FJ
294 struct ttm_placement *pl = &nvbo->placement;
295 uint32_t flags = TTM_PL_MASK_CACHING |
296 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
297
298 pl->placement = nvbo->placements;
299 set_placement_list(nvbo->placements, &pl->num_placement,
300 type, flags);
301
302 pl->busy_placement = nvbo->busy_placements;
303 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
304 type | busy, flags);
699ddfd9
FJ
305
306 set_placement_range(nvbo, type);
6ee73861
BS
307}
308
309int
310nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
311{
ebb945a9 312 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 313 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 314 int ret;
6ee73861 315
0ae6d7bc
DV
316 ret = ttm_bo_reserve(bo, false, false, false, 0);
317 if (ret)
318 goto out;
319
6ee73861 320 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
ebb945a9 321 NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
6ee73861 322 1 << bo->mem.mem_type, memtype);
0ae6d7bc
DV
323 ret = -EINVAL;
324 goto out;
6ee73861
BS
325 }
326
327 if (nvbo->pin_refcnt++)
6ee73861
BS
328 goto out;
329
78ad0f7b 330 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 331
97a875cb 332 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
333 if (ret == 0) {
334 switch (bo->mem.mem_type) {
335 case TTM_PL_VRAM:
ebb945a9 336 drm->gem.vram_available -= bo->mem.size;
6ee73861
BS
337 break;
338 case TTM_PL_TT:
ebb945a9 339 drm->gem.gart_available -= bo->mem.size;
6ee73861
BS
340 break;
341 default:
342 break;
343 }
344 }
6ee73861 345out:
0ae6d7bc 346 ttm_bo_unreserve(bo);
6ee73861
BS
347 return ret;
348}
349
350int
351nouveau_bo_unpin(struct nouveau_bo *nvbo)
352{
ebb945a9 353 struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
6ee73861 354 struct ttm_buffer_object *bo = &nvbo->bo;
4f385599 355 int ret, ref;
6ee73861 356
6ee73861
BS
357 ret = ttm_bo_reserve(bo, false, false, false, 0);
358 if (ret)
359 return ret;
360
4f385599
ML
361 ref = --nvbo->pin_refcnt;
362 WARN_ON_ONCE(ref < 0);
363 if (ref)
0ae6d7bc
DV
364 goto out;
365
78ad0f7b 366 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 367
97a875cb 368 ret = nouveau_bo_validate(nvbo, false, false);
6ee73861
BS
369 if (ret == 0) {
370 switch (bo->mem.mem_type) {
371 case TTM_PL_VRAM:
ebb945a9 372 drm->gem.vram_available += bo->mem.size;
6ee73861
BS
373 break;
374 case TTM_PL_TT:
ebb945a9 375 drm->gem.gart_available += bo->mem.size;
6ee73861
BS
376 break;
377 default:
378 break;
379 }
380 }
381
0ae6d7bc 382out:
6ee73861
BS
383 ttm_bo_unreserve(bo);
384 return ret;
385}
386
387int
388nouveau_bo_map(struct nouveau_bo *nvbo)
389{
390 int ret;
391
392 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
393 if (ret)
394 return ret;
395
396 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
397 ttm_bo_unreserve(&nvbo->bo);
398 return ret;
399}
400
401void
402nouveau_bo_unmap(struct nouveau_bo *nvbo)
403{
9d59e8a1
BS
404 if (nvbo)
405 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
406}
407
7a45d764
BS
408int
409nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
97a875cb 410 bool no_wait_gpu)
7a45d764
BS
411{
412 int ret;
413
97a875cb
ML
414 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
415 interruptible, no_wait_gpu);
7a45d764
BS
416 if (ret)
417 return ret;
418
419 return 0;
420}
421
6ee73861
BS
422u16
423nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
424{
425 bool is_iomem;
426 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
427 mem = &mem[index];
428 if (is_iomem)
429 return ioread16_native((void __force __iomem *)mem);
430 else
431 return *mem;
432}
433
434void
435nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
436{
437 bool is_iomem;
438 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
439 mem = &mem[index];
440 if (is_iomem)
441 iowrite16_native(val, (void __force __iomem *)mem);
442 else
443 *mem = val;
444}
445
446u32
447nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
448{
449 bool is_iomem;
450 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
451 mem = &mem[index];
452 if (is_iomem)
453 return ioread32_native((void __force __iomem *)mem);
454 else
455 return *mem;
456}
457
458void
459nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
460{
461 bool is_iomem;
462 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
463 mem = &mem[index];
464 if (is_iomem)
465 iowrite32_native(val, (void __force __iomem *)mem);
466 else
467 *mem = val;
468}
469
649bf3ca 470static struct ttm_tt *
ebb945a9
BS
471nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
472 uint32_t page_flags, struct page *dummy_read)
6ee73861 473{
df1b4b91 474#if __OS_HAS_AGP
ebb945a9
BS
475 struct nouveau_drm *drm = nouveau_bdev(bdev);
476 struct drm_device *dev = drm->dev;
6ee73861 477
ebb945a9
BS
478 if (drm->agp.stat == ENABLED) {
479 return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
480 page_flags, dummy_read);
6ee73861 481 }
df1b4b91 482#endif
6ee73861 483
ebb945a9 484 return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
6ee73861
BS
485}
486
487static int
488nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
489{
490 /* We'll do this from user space. */
491 return 0;
492}
493
494static int
495nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
496 struct ttm_mem_type_manager *man)
497{
ebb945a9 498 struct nouveau_drm *drm = nouveau_bdev(bdev);
6ee73861
BS
499
500 switch (type) {
501 case TTM_PL_SYSTEM:
502 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
503 man->available_caching = TTM_PL_MASK_CACHING;
504 man->default_caching = TTM_PL_FLAG_CACHED;
505 break;
506 case TTM_PL_VRAM:
ebb945a9 507 if (nv_device(drm->device)->card_type >= NV_50) {
573a2a37 508 man->func = &nouveau_vram_manager;
f869ef88
BS
509 man->io_reserve_fastpath = false;
510 man->use_io_reserve_lru = true;
511 } else {
573a2a37 512 man->func = &ttm_bo_manager_func;
f869ef88 513 }
6ee73861 514 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 515 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
516 man->available_caching = TTM_PL_FLAG_UNCACHED |
517 TTM_PL_FLAG_WC;
518 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
519 break;
520 case TTM_PL_TT:
ebb945a9 521 if (nv_device(drm->device)->card_type >= NV_50)
26c0c9e3 522 man->func = &nouveau_gart_manager;
3863c9bc 523 else
ebb945a9 524 if (drm->agp.stat != ENABLED)
3863c9bc 525 man->func = &nv04_gart_manager;
26c0c9e3
BS
526 else
527 man->func = &ttm_bo_manager_func;
ebb945a9
BS
528
529 if (drm->agp.stat == ENABLED) {
f32f02fd 530 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
531 man->available_caching = TTM_PL_FLAG_UNCACHED |
532 TTM_PL_FLAG_WC;
533 man->default_caching = TTM_PL_FLAG_WC;
ebb945a9 534 } else {
6ee73861
BS
535 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
536 TTM_MEMTYPE_FLAG_CMA;
537 man->available_caching = TTM_PL_MASK_CACHING;
538 man->default_caching = TTM_PL_FLAG_CACHED;
6ee73861 539 }
ebb945a9 540
6ee73861
BS
541 break;
542 default:
6ee73861
BS
543 return -EINVAL;
544 }
545 return 0;
546}
547
548static void
549nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
550{
551 struct nouveau_bo *nvbo = nouveau_bo(bo);
552
553 switch (bo->mem.mem_type) {
22fbd538 554 case TTM_PL_VRAM:
78ad0f7b
FJ
555 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
556 TTM_PL_FLAG_SYSTEM);
22fbd538 557 break;
6ee73861 558 default:
78ad0f7b 559 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
560 break;
561 }
22fbd538
FJ
562
563 *pl = nvbo->placement;
6ee73861
BS
564}
565
566
567/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
568 * TTM_PL_{VRAM,TT} directly.
569 */
a0af9add 570
6ee73861
BS
571static int
572nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21 573 struct nouveau_bo *nvbo, bool evict,
97a875cb 574 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
575{
576 struct nouveau_fence *fence = NULL;
577 int ret;
578
264ce192 579 ret = nouveau_fence_new(chan, false, &fence);
6ee73861
BS
580 if (ret)
581 return ret;
582
b03640b1 583 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
97a875cb 584 no_wait_gpu, new_mem);
382d62e5 585 nouveau_fence_unref(&fence);
6ee73861
BS
586 return ret;
587}
588
49981046
BS
589static int
590nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
591{
592 int ret = RING_SPACE(chan, 2);
593 if (ret == 0) {
594 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
00fc6f6f 595 OUT_RING (chan, handle & 0x0000ffff);
49981046
BS
596 FIRE_RING (chan);
597 }
598 return ret;
599}
600
c6b7e895
BS
601static int
602nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
603 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
604{
605 struct nouveau_mem *node = old_mem->mm_node;
606 int ret = RING_SPACE(chan, 10);
607 if (ret == 0) {
6d597027 608 BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
c6b7e895
BS
609 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
610 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
611 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
612 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
613 OUT_RING (chan, PAGE_SIZE);
614 OUT_RING (chan, PAGE_SIZE);
615 OUT_RING (chan, PAGE_SIZE);
616 OUT_RING (chan, new_mem->num_pages);
6d597027 617 BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
c6b7e895
BS
618 }
619 return ret;
620}
621
d1b167e1
BS
622static int
623nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
624{
625 int ret = RING_SPACE(chan, 2);
626 if (ret == 0) {
627 BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
628 OUT_RING (chan, handle);
629 }
630 return ret;
631}
632
1a46098e
BS
633static int
634nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
635 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
636{
637 struct nouveau_mem *node = old_mem->mm_node;
638 u64 src_offset = node->vma[0].offset;
639 u64 dst_offset = node->vma[1].offset;
640 u32 page_count = new_mem->num_pages;
641 int ret;
642
643 page_count = new_mem->num_pages;
644 while (page_count) {
645 int line_count = (page_count > 8191) ? 8191 : page_count;
646
647 ret = RING_SPACE(chan, 11);
648 if (ret)
649 return ret;
650
651 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
652 OUT_RING (chan, upper_32_bits(src_offset));
653 OUT_RING (chan, lower_32_bits(src_offset));
654 OUT_RING (chan, upper_32_bits(dst_offset));
655 OUT_RING (chan, lower_32_bits(dst_offset));
656 OUT_RING (chan, PAGE_SIZE);
657 OUT_RING (chan, PAGE_SIZE);
658 OUT_RING (chan, PAGE_SIZE);
659 OUT_RING (chan, line_count);
660 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
661 OUT_RING (chan, 0x00000110);
662
663 page_count -= line_count;
664 src_offset += (PAGE_SIZE * line_count);
665 dst_offset += (PAGE_SIZE * line_count);
666 }
667
668 return 0;
669}
670
183720b8
BS
671static int
672nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
673 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
674{
d2f96666
BS
675 struct nouveau_mem *node = old_mem->mm_node;
676 u64 src_offset = node->vma[0].offset;
677 u64 dst_offset = node->vma[1].offset;
183720b8
BS
678 u32 page_count = new_mem->num_pages;
679 int ret;
680
183720b8
BS
681 page_count = new_mem->num_pages;
682 while (page_count) {
683 int line_count = (page_count > 2047) ? 2047 : page_count;
684
685 ret = RING_SPACE(chan, 12);
686 if (ret)
687 return ret;
688
d1b167e1 689 BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
183720b8
BS
690 OUT_RING (chan, upper_32_bits(dst_offset));
691 OUT_RING (chan, lower_32_bits(dst_offset));
d1b167e1 692 BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
183720b8
BS
693 OUT_RING (chan, upper_32_bits(src_offset));
694 OUT_RING (chan, lower_32_bits(src_offset));
695 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
696 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
697 OUT_RING (chan, PAGE_SIZE); /* line_length */
698 OUT_RING (chan, line_count);
d1b167e1 699 BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
183720b8
BS
700 OUT_RING (chan, 0x00100110);
701
702 page_count -= line_count;
703 src_offset += (PAGE_SIZE * line_count);
704 dst_offset += (PAGE_SIZE * line_count);
705 }
706
707 return 0;
708}
709
fdf53241
BS
710static int
711nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
712 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
713{
714 struct nouveau_mem *node = old_mem->mm_node;
715 u64 src_offset = node->vma[0].offset;
716 u64 dst_offset = node->vma[1].offset;
717 u32 page_count = new_mem->num_pages;
718 int ret;
719
720 page_count = new_mem->num_pages;
721 while (page_count) {
722 int line_count = (page_count > 8191) ? 8191 : page_count;
723
724 ret = RING_SPACE(chan, 11);
725 if (ret)
726 return ret;
727
728 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
729 OUT_RING (chan, upper_32_bits(src_offset));
730 OUT_RING (chan, lower_32_bits(src_offset));
731 OUT_RING (chan, upper_32_bits(dst_offset));
732 OUT_RING (chan, lower_32_bits(dst_offset));
733 OUT_RING (chan, PAGE_SIZE);
734 OUT_RING (chan, PAGE_SIZE);
735 OUT_RING (chan, PAGE_SIZE);
736 OUT_RING (chan, line_count);
737 BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
738 OUT_RING (chan, 0x00000110);
739
740 page_count -= line_count;
741 src_offset += (PAGE_SIZE * line_count);
742 dst_offset += (PAGE_SIZE * line_count);
743 }
744
745 return 0;
746}
747
5490e5df
BS
748static int
749nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
750 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
751{
752 struct nouveau_mem *node = old_mem->mm_node;
753 int ret = RING_SPACE(chan, 7);
754 if (ret == 0) {
755 BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
756 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
757 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
758 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
759 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
760 OUT_RING (chan, 0x00000000 /* COPY */);
761 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
762 }
763 return ret;
764}
765
4c193d25
BS
766static int
767nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
768 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
769{
770 struct nouveau_mem *node = old_mem->mm_node;
771 int ret = RING_SPACE(chan, 7);
772 if (ret == 0) {
773 BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
774 OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
775 OUT_RING (chan, upper_32_bits(node->vma[0].offset));
776 OUT_RING (chan, lower_32_bits(node->vma[0].offset));
777 OUT_RING (chan, upper_32_bits(node->vma[1].offset));
778 OUT_RING (chan, lower_32_bits(node->vma[1].offset));
779 OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
780 }
781 return ret;
782}
783
d1b167e1
BS
784static int
785nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
786{
ebb945a9 787 int ret = RING_SPACE(chan, 6);
d1b167e1 788 if (ret == 0) {
ebb945a9
BS
789 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
790 OUT_RING (chan, handle);
791 BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
792 OUT_RING (chan, NvNotify0);
793 OUT_RING (chan, NvDmaFB);
794 OUT_RING (chan, NvDmaFB);
d1b167e1
BS
795 }
796
797 return ret;
798}
799
6ee73861 800static int
f1ab0cc9
BS
801nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
802 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 803{
d2f96666 804 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
805 struct nouveau_bo *nvbo = nouveau_bo(bo);
806 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
807 u64 src_offset = node->vma[0].offset;
808 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
809 int ret;
810
f1ab0cc9
BS
811 while (length) {
812 u32 amount, stride, height;
813
5220b3c1
BS
814 amount = min(length, (u64)(4 * 1024 * 1024));
815 stride = 16 * 4;
f1ab0cc9
BS
816 height = amount / stride;
817
c1b90df2 818 if (old_mem->mem_type == TTM_PL_VRAM &&
f13b3263 819 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
820 ret = RING_SPACE(chan, 8);
821 if (ret)
822 return ret;
823
d1b167e1 824 BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
f1ab0cc9 825 OUT_RING (chan, 0);
5220b3c1 826 OUT_RING (chan, 0);
f1ab0cc9
BS
827 OUT_RING (chan, stride);
828 OUT_RING (chan, height);
829 OUT_RING (chan, 1);
830 OUT_RING (chan, 0);
831 OUT_RING (chan, 0);
832 } else {
833 ret = RING_SPACE(chan, 2);
834 if (ret)
835 return ret;
836
d1b167e1 837 BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
f1ab0cc9
BS
838 OUT_RING (chan, 1);
839 }
c1b90df2 840 if (new_mem->mem_type == TTM_PL_VRAM &&
f13b3263 841 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
842 ret = RING_SPACE(chan, 8);
843 if (ret)
844 return ret;
845
d1b167e1 846 BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
f1ab0cc9 847 OUT_RING (chan, 0);
5220b3c1 848 OUT_RING (chan, 0);
f1ab0cc9
BS
849 OUT_RING (chan, stride);
850 OUT_RING (chan, height);
851 OUT_RING (chan, 1);
852 OUT_RING (chan, 0);
853 OUT_RING (chan, 0);
854 } else {
855 ret = RING_SPACE(chan, 2);
856 if (ret)
857 return ret;
858
d1b167e1 859 BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
f1ab0cc9
BS
860 OUT_RING (chan, 1);
861 }
862
863 ret = RING_SPACE(chan, 14);
6ee73861
BS
864 if (ret)
865 return ret;
f1ab0cc9 866
d1b167e1 867 BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
f1ab0cc9
BS
868 OUT_RING (chan, upper_32_bits(src_offset));
869 OUT_RING (chan, upper_32_bits(dst_offset));
d1b167e1 870 BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
f1ab0cc9
BS
871 OUT_RING (chan, lower_32_bits(src_offset));
872 OUT_RING (chan, lower_32_bits(dst_offset));
873 OUT_RING (chan, stride);
874 OUT_RING (chan, stride);
875 OUT_RING (chan, stride);
876 OUT_RING (chan, height);
877 OUT_RING (chan, 0x00000101);
878 OUT_RING (chan, 0x00000000);
d1b167e1 879 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9
BS
880 OUT_RING (chan, 0);
881
882 length -= amount;
883 src_offset += amount;
884 dst_offset += amount;
6ee73861
BS
885 }
886
f1ab0cc9
BS
887 return 0;
888}
889
d1b167e1
BS
890static int
891nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
892{
ebb945a9 893 int ret = RING_SPACE(chan, 4);
d1b167e1 894 if (ret == 0) {
ebb945a9
BS
895 BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
896 OUT_RING (chan, handle);
897 BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
898 OUT_RING (chan, NvNotify0);
d1b167e1
BS
899 }
900
901 return ret;
902}
903
a6704788
BS
904static inline uint32_t
905nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
906 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
907{
908 if (mem->mem_type == TTM_PL_TT)
ebb945a9
BS
909 return NvDmaTT;
910 return NvDmaFB;
a6704788
BS
911}
912
f1ab0cc9
BS
913static int
914nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
915 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
916{
d961db75
BS
917 u32 src_offset = old_mem->start << PAGE_SHIFT;
918 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
919 u32 page_count = new_mem->num_pages;
920 int ret;
921
922 ret = RING_SPACE(chan, 3);
923 if (ret)
924 return ret;
925
d1b167e1 926 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
f1ab0cc9
BS
927 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
928 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
929
6ee73861
BS
930 page_count = new_mem->num_pages;
931 while (page_count) {
932 int line_count = (page_count > 2047) ? 2047 : page_count;
933
6ee73861
BS
934 ret = RING_SPACE(chan, 11);
935 if (ret)
936 return ret;
f1ab0cc9 937
d1b167e1 938 BEGIN_NV04(chan, NvSubCopy,
6ee73861 939 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
940 OUT_RING (chan, src_offset);
941 OUT_RING (chan, dst_offset);
942 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
943 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
944 OUT_RING (chan, PAGE_SIZE); /* line_length */
945 OUT_RING (chan, line_count);
946 OUT_RING (chan, 0x00000101);
947 OUT_RING (chan, 0x00000000);
d1b167e1 948 BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 949 OUT_RING (chan, 0);
6ee73861
BS
950
951 page_count -= line_count;
952 src_offset += (PAGE_SIZE * line_count);
953 dst_offset += (PAGE_SIZE * line_count);
954 }
955
f1ab0cc9
BS
956 return 0;
957}
958
d2f96666
BS
959static int
960nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
961 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
962{
963 struct nouveau_mem *node = mem->mm_node;
964 int ret;
965
ebb945a9
BS
966 ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
967 PAGE_SHIFT, node->page_shift,
968 NV_MEM_ACCESS_RW, vma);
d2f96666
BS
969 if (ret)
970 return ret;
971
972 if (mem->mem_type == TTM_PL_VRAM)
973 nouveau_vm_map(vma, node);
974 else
f7b24c42 975 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
d2f96666
BS
976
977 return 0;
978}
979
f1ab0cc9
BS
980static int
981nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
97a875cb 982 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
f1ab0cc9 983{
ebb945a9 984 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1934a2ad 985 struct nouveau_channel *chan = drm->ttm.chan;
f1ab0cc9 986 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 987 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
988 int ret;
989
060810d7 990 mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
f1ab0cc9 991
d2f96666
BS
992 /* create temporary vmas for the transfer and attach them to the
993 * old nouveau_mem node, these will get cleaned up after ttm has
994 * destroyed the ttm_mem_reg
3425df48 995 */
ebb945a9 996 if (nv_device(drm->device)->card_type >= NV_50) {
d5f42394 997 struct nouveau_mem *node = old_mem->mm_node;
3425df48 998
d2f96666
BS
999 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
1000 if (ret)
1001 goto out;
1002
1003 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
1004 if (ret)
1005 goto out;
3425df48
BS
1006 }
1007
ebb945a9 1008 ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
1009 if (ret == 0) {
1010 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
6a6b73f2
BS
1011 no_wait_gpu, new_mem);
1012 }
f1ab0cc9 1013
3425df48 1014out:
ebb945a9 1015 mutex_unlock(&chan->cli->mutex);
6a6b73f2 1016 return ret;
6ee73861
BS
1017}
1018
d1b167e1 1019void
49981046 1020nouveau_bo_move_init(struct nouveau_drm *drm)
d1b167e1 1021{
d1b167e1
BS
1022 static const struct {
1023 const char *name;
1a46098e 1024 int engine;
d1b167e1
BS
1025 u32 oclass;
1026 int (*exec)(struct nouveau_channel *,
1027 struct ttm_buffer_object *,
1028 struct ttm_mem_reg *, struct ttm_mem_reg *);
1029 int (*init)(struct nouveau_channel *, u32 handle);
1030 } _methods[] = {
00fc6f6f 1031 { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
49981046 1032 { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
1a46098e
BS
1033 { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
1034 { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
1035 { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
1036 { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
1037 { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
1038 { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
1039 { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
5490e5df 1040 {},
1a46098e 1041 { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
d1b167e1
BS
1042 }, *mthd = _methods;
1043 const char *name = "CPU";
1044 int ret;
1045
1046 do {
ebb945a9 1047 struct nouveau_object *object;
49981046 1048 struct nouveau_channel *chan;
1a46098e 1049 u32 handle = (mthd->engine << 16) | mthd->oclass;
ebb945a9 1050
00fc6f6f 1051 if (mthd->engine)
49981046
BS
1052 chan = drm->cechan;
1053 else
1054 chan = drm->channel;
1055 if (chan == NULL)
1056 continue;
1057
1058 ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
ebb945a9 1059 mthd->oclass, NULL, 0, &object);
d1b167e1 1060 if (ret == 0) {
1a46098e 1061 ret = mthd->init(chan, handle);
ebb945a9 1062 if (ret) {
49981046 1063 nouveau_object_del(nv_object(drm),
ebb945a9
BS
1064 chan->handle, handle);
1065 continue;
d1b167e1 1066 }
ebb945a9
BS
1067
1068 drm->ttm.move = mthd->exec;
1bb3f6a2 1069 drm->ttm.chan = chan;
ebb945a9
BS
1070 name = mthd->name;
1071 break;
d1b167e1
BS
1072 }
1073 } while ((++mthd)->exec);
1074
ebb945a9 1075 NV_INFO(drm, "MM: using %s for buffer copies\n", name);
d1b167e1
BS
1076}
1077
6ee73861
BS
1078static int
1079nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1080 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1081{
1082 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1083 struct ttm_placement placement;
1084 struct ttm_mem_reg tmp_mem;
1085 int ret;
1086
1087 placement.fpfn = placement.lpfn = 0;
1088 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1089 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1090
1091 tmp_mem = *new_mem;
1092 tmp_mem.mm_node = NULL;
97a875cb 1093 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1094 if (ret)
1095 return ret;
1096
1097 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
1098 if (ret)
1099 goto out;
1100
97a875cb 1101 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
6ee73861
BS
1102 if (ret)
1103 goto out;
1104
97a875cb 1105 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
6ee73861 1106out:
42311ff9 1107 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1108 return ret;
1109}
1110
1111static int
1112nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1113 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
6ee73861
BS
1114{
1115 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
1116 struct ttm_placement placement;
1117 struct ttm_mem_reg tmp_mem;
1118 int ret;
1119
1120 placement.fpfn = placement.lpfn = 0;
1121 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 1122 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
1123
1124 tmp_mem = *new_mem;
1125 tmp_mem.mm_node = NULL;
97a875cb 1126 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
6ee73861
BS
1127 if (ret)
1128 return ret;
1129
97a875cb 1130 ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
6ee73861
BS
1131 if (ret)
1132 goto out;
1133
97a875cb 1134 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
6ee73861
BS
1135 if (ret)
1136 goto out;
1137
1138out:
42311ff9 1139 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
1140 return ret;
1141}
1142
a4154bbf
BS
1143static void
1144nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
1145{
a4154bbf 1146 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
1147 struct nouveau_vma *vma;
1148
9f1feed2
BS
1149 /* ttm can now (stupidly) pass the driver bos it didn't create... */
1150 if (bo->destroy != nouveau_bo_del_ttm)
1151 return;
1152
fd2871af 1153 list_for_each_entry(vma, &nvbo->vma_list, head) {
dc97b340 1154 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
fd2871af
BS
1155 nouveau_vm_map(vma, new_mem->mm_node);
1156 } else
dc97b340 1157 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
ebb945a9 1158 nvbo->page_shift == vma->vm->vmm->spg_shift) {
22b33e8e
DA
1159 if (((struct nouveau_mem *)new_mem->mm_node)->sg)
1160 nouveau_vm_map_sg_table(vma, 0, new_mem->
1161 num_pages << PAGE_SHIFT,
1162 new_mem->mm_node);
1163 else
1164 nouveau_vm_map_sg(vma, 0, new_mem->
1165 num_pages << PAGE_SHIFT,
1166 new_mem->mm_node);
fd2871af
BS
1167 } else {
1168 nouveau_vm_unmap(vma);
1169 }
a4154bbf
BS
1170 }
1171}
1172
6ee73861 1173static int
a0af9add 1174nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
ebb945a9 1175 struct nouveau_drm_tile **new_tile)
6ee73861 1176{
ebb945a9
BS
1177 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1178 struct drm_device *dev = drm->dev;
a0af9add 1179 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 1180 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 1181
a4154bbf
BS
1182 *new_tile = NULL;
1183 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 1184 return 0;
a0af9add 1185
ebb945a9 1186 if (nv_device(drm->device)->card_type >= NV_10) {
bc9e7b9a 1187 *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
1188 nvbo->tile_mode,
1189 nvbo->tile_flags);
6ee73861
BS
1190 }
1191
a0af9add
FJ
1192 return 0;
1193}
1194
1195static void
1196nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
ebb945a9
BS
1197 struct nouveau_drm_tile *new_tile,
1198 struct nouveau_drm_tile **old_tile)
a0af9add 1199{
ebb945a9
BS
1200 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
1201 struct drm_device *dev = drm->dev;
a0af9add 1202
bc9e7b9a 1203 nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
a4154bbf 1204 *old_tile = new_tile;
a0af9add
FJ
1205}
1206
1207static int
1208nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
97a875cb 1209 bool no_wait_gpu, struct ttm_mem_reg *new_mem)
a0af9add 1210{
ebb945a9 1211 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
a0af9add
FJ
1212 struct nouveau_bo *nvbo = nouveau_bo(bo);
1213 struct ttm_mem_reg *old_mem = &bo->mem;
ebb945a9 1214 struct nouveau_drm_tile *new_tile = NULL;
a0af9add
FJ
1215 int ret = 0;
1216
ebb945a9 1217 if (nv_device(drm->device)->card_type < NV_50) {
a4154bbf
BS
1218 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
1219 if (ret)
1220 return ret;
1221 }
a0af9add 1222
a0af9add 1223 /* Fake bo copy. */
6ee73861
BS
1224 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
1225 BUG_ON(bo->mem.mm_node != NULL);
1226 bo->mem = *new_mem;
1227 new_mem->mm_node = NULL;
a0af9add 1228 goto out;
6ee73861
BS
1229 }
1230
d1b167e1 1231 /* CPU copy if we have no accelerated method available */
ebb945a9 1232 if (!drm->ttm.move) {
97a875cb 1233 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
b8a6a804
BS
1234 goto out;
1235 }
1236
a0af9add
FJ
1237 /* Hardware assisted copy. */
1238 if (new_mem->mem_type == TTM_PL_SYSTEM)
97a875cb
ML
1239 ret = nouveau_bo_move_flipd(bo, evict, intr,
1240 no_wait_gpu, new_mem);
a0af9add 1241 else if (old_mem->mem_type == TTM_PL_SYSTEM)
97a875cb
ML
1242 ret = nouveau_bo_move_flips(bo, evict, intr,
1243 no_wait_gpu, new_mem);
a0af9add 1244 else
97a875cb
ML
1245 ret = nouveau_bo_move_m2mf(bo, evict, intr,
1246 no_wait_gpu, new_mem);
6ee73861 1247
a0af9add
FJ
1248 if (!ret)
1249 goto out;
1250
1251 /* Fallback to software copy. */
97a875cb 1252 ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
a0af9add
FJ
1253
1254out:
ebb945a9 1255 if (nv_device(drm->device)->card_type < NV_50) {
a4154bbf
BS
1256 if (ret)
1257 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
1258 else
1259 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
1260 }
a0af9add
FJ
1261
1262 return ret;
6ee73861
BS
1263}
1264
1265static int
1266nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
1267{
acb46527
DH
1268 struct nouveau_bo *nvbo = nouveau_bo(bo);
1269
55fb74ad 1270 return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
6ee73861
BS
1271}
1272
f32f02fd
JG
1273static int
1274nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1275{
1276 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
ebb945a9
BS
1277 struct nouveau_drm *drm = nouveau_bdev(bdev);
1278 struct drm_device *dev = drm->dev;
f869ef88 1279 int ret;
f32f02fd
JG
1280
1281 mem->bus.addr = NULL;
1282 mem->bus.offset = 0;
1283 mem->bus.size = mem->num_pages << PAGE_SHIFT;
1284 mem->bus.base = 0;
1285 mem->bus.is_iomem = false;
1286 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
1287 return -EINVAL;
1288 switch (mem->mem_type) {
1289 case TTM_PL_SYSTEM:
1290 /* System memory */
1291 return 0;
1292 case TTM_PL_TT:
1293#if __OS_HAS_AGP
ebb945a9 1294 if (drm->agp.stat == ENABLED) {
d961db75 1295 mem->bus.offset = mem->start << PAGE_SHIFT;
ebb945a9 1296 mem->bus.base = drm->agp.base;
eda85d6a 1297 mem->bus.is_iomem = !dev->agp->cant_use_aperture;
f32f02fd
JG
1298 }
1299#endif
1300 break;
1301 case TTM_PL_VRAM:
3863c9bc
BS
1302 mem->bus.offset = mem->start << PAGE_SHIFT;
1303 mem->bus.base = pci_resource_start(dev->pdev, 1);
1304 mem->bus.is_iomem = true;
ebb945a9
BS
1305 if (nv_device(drm->device)->card_type >= NV_50) {
1306 struct nouveau_bar *bar = nouveau_bar(drm->device);
3863c9bc 1307 struct nouveau_mem *node = mem->mm_node;
8984e046 1308
ebb945a9 1309 ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
3863c9bc
BS
1310 &node->bar_vma);
1311 if (ret)
1312 return ret;
f869ef88 1313
3863c9bc 1314 mem->bus.offset = node->bar_vma.offset;
f869ef88 1315 }
f32f02fd
JG
1316 break;
1317 default:
1318 return -EINVAL;
1319 }
1320 return 0;
1321}
1322
1323static void
1324nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
1325{
ebb945a9
BS
1326 struct nouveau_drm *drm = nouveau_bdev(bdev);
1327 struct nouveau_bar *bar = nouveau_bar(drm->device);
d5f42394 1328 struct nouveau_mem *node = mem->mm_node;
f869ef88 1329
d5f42394 1330 if (!node->bar_vma.node)
f869ef88
BS
1331 return;
1332
ebb945a9 1333 bar->unmap(bar, &node->bar_vma);
f32f02fd
JG
1334}
1335
1336static int
1337nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1338{
ebb945a9 1339 struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
e1429b4c 1340 struct nouveau_bo *nvbo = nouveau_bo(bo);
ebb945a9
BS
1341 struct nouveau_device *device = nv_device(drm->device);
1342 u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
e1429b4c
BS
1343
1344 /* as long as the bo isn't in vram, and isn't tiled, we've got
1345 * nothing to do here.
1346 */
1347 if (bo->mem.mem_type != TTM_PL_VRAM) {
ebb945a9 1348 if (nv_device(drm->device)->card_type < NV_50 ||
f13b3263 1349 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1350 return 0;
1351 }
1352
1353 /* make sure bo is in mappable vram */
ebb945a9 1354 if (bo->mem.start + bo->mem.num_pages < mappable)
e1429b4c
BS
1355 return 0;
1356
1357
1358 nvbo->placement.fpfn = 0;
ebb945a9 1359 nvbo->placement.lpfn = mappable;
c284815d 1360 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
97a875cb 1361 return nouveau_bo_validate(nvbo, false, false);
f32f02fd
JG
1362}
1363
3230cfc3
KRW
1364static int
1365nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1366{
8e7e7052 1367 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1368 struct nouveau_drm *drm;
3230cfc3
KRW
1369 struct drm_device *dev;
1370 unsigned i;
1371 int r;
22b33e8e 1372 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
3230cfc3
KRW
1373
1374 if (ttm->state != tt_unpopulated)
1375 return 0;
1376
22b33e8e
DA
1377 if (slave && ttm->sg) {
1378 /* make userspace faulting work */
1379 drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
1380 ttm_dma->dma_address, ttm->num_pages);
1381 ttm->state = tt_unbound;
1382 return 0;
1383 }
1384
ebb945a9
BS
1385 drm = nouveau_bdev(ttm->bdev);
1386 dev = drm->dev;
3230cfc3 1387
dea7e0ac 1388#if __OS_HAS_AGP
ebb945a9 1389 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1390 return ttm_agp_tt_populate(ttm);
1391 }
1392#endif
1393
3230cfc3
KRW
1394#ifdef CONFIG_SWIOTLB
1395 if (swiotlb_nr_tbl()) {
8e7e7052 1396 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1397 }
1398#endif
1399
1400 r = ttm_pool_populate(ttm);
1401 if (r) {
1402 return r;
1403 }
1404
1405 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1406 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
3230cfc3
KRW
1407 0, PAGE_SIZE,
1408 PCI_DMA_BIDIRECTIONAL);
8e7e7052 1409 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
3230cfc3 1410 while (--i) {
8e7e7052 1411 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3 1412 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
8e7e7052 1413 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1414 }
1415 ttm_pool_unpopulate(ttm);
1416 return -EFAULT;
1417 }
1418 }
1419 return 0;
1420}
1421
1422static void
1423nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1424{
8e7e7052 1425 struct ttm_dma_tt *ttm_dma = (void *)ttm;
ebb945a9 1426 struct nouveau_drm *drm;
3230cfc3
KRW
1427 struct drm_device *dev;
1428 unsigned i;
22b33e8e
DA
1429 bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
1430
1431 if (slave)
1432 return;
3230cfc3 1433
ebb945a9
BS
1434 drm = nouveau_bdev(ttm->bdev);
1435 dev = drm->dev;
3230cfc3 1436
dea7e0ac 1437#if __OS_HAS_AGP
ebb945a9 1438 if (drm->agp.stat == ENABLED) {
dea7e0ac
JG
1439 ttm_agp_tt_unpopulate(ttm);
1440 return;
1441 }
1442#endif
1443
3230cfc3
KRW
1444#ifdef CONFIG_SWIOTLB
1445 if (swiotlb_nr_tbl()) {
8e7e7052 1446 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1447 return;
1448 }
1449#endif
1450
1451 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052
JG
1452 if (ttm_dma->dma_address[i]) {
1453 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3
KRW
1454 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1455 }
1456 }
1457
1458 ttm_pool_unpopulate(ttm);
1459}
1460
875ac34a
BS
1461void
1462nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1463{
1464 struct nouveau_fence *old_fence = NULL;
1465
1466 if (likely(fence))
1467 nouveau_fence_ref(fence);
1468
1469 spin_lock(&nvbo->bo.bdev->fence_lock);
1470 old_fence = nvbo->bo.sync_obj;
1471 nvbo->bo.sync_obj = fence;
1472 spin_unlock(&nvbo->bo.bdev->fence_lock);
1473
1474 nouveau_fence_unref(&old_fence);
1475}
1476
1477static void
1478nouveau_bo_fence_unref(void **sync_obj)
1479{
1480 nouveau_fence_unref((struct nouveau_fence **)sync_obj);
1481}
1482
1483static void *
1484nouveau_bo_fence_ref(void *sync_obj)
1485{
1486 return nouveau_fence_ref(sync_obj);
1487}
1488
1489static bool
dedfdffd 1490nouveau_bo_fence_signalled(void *sync_obj)
875ac34a 1491{
d375e7d5 1492 return nouveau_fence_done(sync_obj);
875ac34a
BS
1493}
1494
1495static int
dedfdffd 1496nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
875ac34a
BS
1497{
1498 return nouveau_fence_wait(sync_obj, lazy, intr);
1499}
1500
1501static int
dedfdffd 1502nouveau_bo_fence_flush(void *sync_obj)
875ac34a
BS
1503{
1504 return 0;
1505}
1506
6ee73861 1507struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1508 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1509 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1510 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1511 .invalidate_caches = nouveau_bo_invalidate_caches,
1512 .init_mem_type = nouveau_bo_init_mem_type,
1513 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1514 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1515 .move = nouveau_bo_move,
1516 .verify_access = nouveau_bo_verify_access,
875ac34a
BS
1517 .sync_obj_signaled = nouveau_bo_fence_signalled,
1518 .sync_obj_wait = nouveau_bo_fence_wait,
1519 .sync_obj_flush = nouveau_bo_fence_flush,
1520 .sync_obj_unref = nouveau_bo_fence_unref,
1521 .sync_obj_ref = nouveau_bo_fence_ref,
f32f02fd
JG
1522 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1523 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1524 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1525};
1526
fd2871af
BS
1527struct nouveau_vma *
1528nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1529{
1530 struct nouveau_vma *vma;
1531 list_for_each_entry(vma, &nvbo->vma_list, head) {
1532 if (vma->vm == vm)
1533 return vma;
1534 }
1535
1536 return NULL;
1537}
1538
1539int
1540nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1541 struct nouveau_vma *vma)
1542{
1543 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1544 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1545 int ret;
1546
1547 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1548 NV_MEM_ACCESS_RW, vma);
1549 if (ret)
1550 return ret;
1551
1552 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1553 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
22b33e8e
DA
1554 else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
1555 if (node->sg)
1556 nouveau_vm_map_sg_table(vma, 0, size, node);
1557 else
1558 nouveau_vm_map_sg(vma, 0, size, node);
1559 }
fd2871af
BS
1560
1561 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1562 vma->refcount = 1;
fd2871af
BS
1563 return 0;
1564}
1565
1566void
1567nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1568{
1569 if (vma->node) {
c4c7044f 1570 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
fd2871af 1571 nouveau_vm_unmap(vma);
fd2871af
BS
1572 nouveau_vm_put(vma);
1573 list_del(&vma->head);
1574 }
1575}
This page took 0.395487 seconds and 5 git commands to generate.