drm/nv50: implement BAR1/BAR3 management on top of new VM code
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
31
32#include "nouveau_drm.h"
33#include "nouveau_drv.h"
34#include "nouveau_dma.h"
f869ef88
BS
35#include "nouveau_mm.h"
36#include "nouveau_vm.h"
6ee73861 37
a510604d 38#include <linux/log2.h>
5a0e3ad6 39#include <linux/slab.h>
a510604d 40
6ee73861
BS
41static void
42nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
43{
44 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 45 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
46 struct nouveau_bo *nvbo = nouveau_bo(bo);
47
6ee73861
BS
48 if (unlikely(nvbo->gem))
49 DRM_ERROR("bo %p still attached to GEM object\n", bo);
50
a5cf68b0 51 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
52 kfree(nvbo);
53}
54
a0af9add
FJ
55static void
56nouveau_bo_fixup_align(struct drm_device *dev,
57 uint32_t tile_mode, uint32_t tile_flags,
58 int *align, int *size)
59{
60 struct drm_nouveau_private *dev_priv = dev->dev_private;
61
573a2a37 62 if (dev_priv->card_type < NV_50) {
a0af9add
FJ
63 if (tile_mode) {
64 if (dev_priv->chipset >= 0x40) {
65 *align = 65536;
66 *size = roundup(*size, 64 * tile_mode);
67
68 } else if (dev_priv->chipset >= 0x30) {
69 *align = 32768;
70 *size = roundup(*size, 64 * tile_mode);
71
72 } else if (dev_priv->chipset >= 0x20) {
73 *align = 16384;
74 *size = roundup(*size, 64 * tile_mode);
75
76 } else if (dev_priv->chipset >= 0x10) {
77 *align = 16384;
78 *size = roundup(*size, 32 * tile_mode);
79 }
80 }
81 }
82
1c7059e4
MM
83 /* ALIGN works only on powers of two. */
84 *size = roundup(*size, PAGE_SIZE);
a0af9add 85 if (dev_priv->card_type == NV_50) {
1c7059e4 86 *size = roundup(*size, 65536);
a0af9add
FJ
87 *align = max(65536, *align);
88 }
89}
90
6ee73861
BS
91int
92nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
93 int size, int align, uint32_t flags, uint32_t tile_mode,
94 uint32_t tile_flags, bool no_vm, bool mappable,
95 struct nouveau_bo **pnvbo)
96{
97 struct drm_nouveau_private *dev_priv = dev->dev_private;
98 struct nouveau_bo *nvbo;
8dea4a19 99 int ret = 0;
6ee73861
BS
100
101 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
102 if (!nvbo)
103 return -ENOMEM;
104 INIT_LIST_HEAD(&nvbo->head);
105 INIT_LIST_HEAD(&nvbo->entry);
106 nvbo->mappable = mappable;
107 nvbo->no_vm = no_vm;
108 nvbo->tile_mode = tile_mode;
109 nvbo->tile_flags = tile_flags;
699ddfd9 110 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 111
f13b3263
FJ
112 nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
113 &align, &size);
6ee73861
BS
114 align >>= PAGE_SHIFT;
115
78ad0f7b 116 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861
BS
117
118 nvbo->channel = chan;
6ee73861
BS
119 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
120 ttm_bo_type_device, &nvbo->placement, align, 0,
121 false, NULL, size, nouveau_bo_del_ttm);
6ee73861
BS
122 if (ret) {
123 /* ttm will call nouveau_bo_del_ttm if it fails.. */
124 return ret;
125 }
90af89b9 126 nvbo->channel = NULL;
6ee73861 127
6ee73861
BS
128 *pnvbo = nvbo;
129 return 0;
130}
131
78ad0f7b
FJ
132static void
133set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
134{
135 *n = 0;
136
137 if (type & TTM_PL_FLAG_VRAM)
138 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
139 if (type & TTM_PL_FLAG_TT)
140 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
141 if (type & TTM_PL_FLAG_SYSTEM)
142 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
143}
144
699ddfd9
FJ
145static void
146set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
147{
148 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
149
150 if (dev_priv->card_type == NV_10 &&
151 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
152 /*
153 * Make sure that the color and depth buffers are handled
154 * by independent memory controller units. Up to a 9x
155 * speed up when alpha-blending and depth-test are enabled
156 * at the same time.
157 */
158 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
159
160 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
161 nvbo->placement.fpfn = vram_pages / 2;
162 nvbo->placement.lpfn = ~0;
163 } else {
164 nvbo->placement.fpfn = 0;
165 nvbo->placement.lpfn = vram_pages / 2;
166 }
167 }
168}
169
6ee73861 170void
78ad0f7b 171nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 172{
78ad0f7b
FJ
173 struct ttm_placement *pl = &nvbo->placement;
174 uint32_t flags = TTM_PL_MASK_CACHING |
175 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
176
177 pl->placement = nvbo->placements;
178 set_placement_list(nvbo->placements, &pl->num_placement,
179 type, flags);
180
181 pl->busy_placement = nvbo->busy_placements;
182 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
183 type | busy, flags);
699ddfd9
FJ
184
185 set_placement_range(nvbo, type);
6ee73861
BS
186}
187
188int
189nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
190{
191 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
192 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 193 int ret;
6ee73861
BS
194
195 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
196 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
197 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
198 1 << bo->mem.mem_type, memtype);
199 return -EINVAL;
200 }
201
202 if (nvbo->pin_refcnt++)
203 return 0;
204
205 ret = ttm_bo_reserve(bo, false, false, false, 0);
206 if (ret)
207 goto out;
208
78ad0f7b 209 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 210
7a45d764 211 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
212 if (ret == 0) {
213 switch (bo->mem.mem_type) {
214 case TTM_PL_VRAM:
215 dev_priv->fb_aper_free -= bo->mem.size;
216 break;
217 case TTM_PL_TT:
218 dev_priv->gart_info.aper_free -= bo->mem.size;
219 break;
220 default:
221 break;
222 }
223 }
224 ttm_bo_unreserve(bo);
225out:
226 if (unlikely(ret))
227 nvbo->pin_refcnt--;
228 return ret;
229}
230
231int
232nouveau_bo_unpin(struct nouveau_bo *nvbo)
233{
234 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
235 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 236 int ret;
6ee73861
BS
237
238 if (--nvbo->pin_refcnt)
239 return 0;
240
241 ret = ttm_bo_reserve(bo, false, false, false, 0);
242 if (ret)
243 return ret;
244
78ad0f7b 245 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 246
7a45d764 247 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
248 if (ret == 0) {
249 switch (bo->mem.mem_type) {
250 case TTM_PL_VRAM:
251 dev_priv->fb_aper_free += bo->mem.size;
252 break;
253 case TTM_PL_TT:
254 dev_priv->gart_info.aper_free += bo->mem.size;
255 break;
256 default:
257 break;
258 }
259 }
260
261 ttm_bo_unreserve(bo);
262 return ret;
263}
264
265int
266nouveau_bo_map(struct nouveau_bo *nvbo)
267{
268 int ret;
269
270 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
271 if (ret)
272 return ret;
273
274 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
275 ttm_bo_unreserve(&nvbo->bo);
276 return ret;
277}
278
279void
280nouveau_bo_unmap(struct nouveau_bo *nvbo)
281{
9d59e8a1
BS
282 if (nvbo)
283 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
284}
285
7a45d764
BS
286int
287nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
288 bool no_wait_reserve, bool no_wait_gpu)
289{
290 int ret;
291
292 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
293 no_wait_reserve, no_wait_gpu);
294 if (ret)
295 return ret;
296
297 return 0;
298}
299
6ee73861
BS
300u16
301nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
302{
303 bool is_iomem;
304 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
305 mem = &mem[index];
306 if (is_iomem)
307 return ioread16_native((void __force __iomem *)mem);
308 else
309 return *mem;
310}
311
312void
313nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
314{
315 bool is_iomem;
316 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
317 mem = &mem[index];
318 if (is_iomem)
319 iowrite16_native(val, (void __force __iomem *)mem);
320 else
321 *mem = val;
322}
323
324u32
325nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
326{
327 bool is_iomem;
328 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
329 mem = &mem[index];
330 if (is_iomem)
331 return ioread32_native((void __force __iomem *)mem);
332 else
333 return *mem;
334}
335
336void
337nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
338{
339 bool is_iomem;
340 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
341 mem = &mem[index];
342 if (is_iomem)
343 iowrite32_native(val, (void __force __iomem *)mem);
344 else
345 *mem = val;
346}
347
348static struct ttm_backend *
349nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
350{
351 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
352 struct drm_device *dev = dev_priv->dev;
353
354 switch (dev_priv->gart_info.type) {
b694dfb2 355#if __OS_HAS_AGP
6ee73861
BS
356 case NOUVEAU_GART_AGP:
357 return ttm_agp_backend_init(bdev, dev->agp->bridge);
b694dfb2 358#endif
6ee73861
BS
359 case NOUVEAU_GART_SGDMA:
360 return nouveau_sgdma_init_ttm(dev);
361 default:
362 NV_ERROR(dev, "Unknown GART type %d\n",
363 dev_priv->gart_info.type);
364 break;
365 }
366
367 return NULL;
368}
369
370static int
371nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
372{
373 /* We'll do this from user space. */
374 return 0;
375}
376
377static int
378nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
379 struct ttm_mem_type_manager *man)
380{
381 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
382 struct drm_device *dev = dev_priv->dev;
383
384 switch (type) {
385 case TTM_PL_SYSTEM:
386 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
387 man->available_caching = TTM_PL_MASK_CACHING;
388 man->default_caching = TTM_PL_FLAG_CACHED;
389 break;
390 case TTM_PL_VRAM:
f869ef88 391 if (dev_priv->card_type == NV_50) {
573a2a37 392 man->func = &nouveau_vram_manager;
f869ef88
BS
393 man->io_reserve_fastpath = false;
394 man->use_io_reserve_lru = true;
395 } else {
573a2a37 396 man->func = &ttm_bo_manager_func;
f869ef88 397 }
6ee73861 398 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 399 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
400 man->available_caching = TTM_PL_FLAG_UNCACHED |
401 TTM_PL_FLAG_WC;
402 man->default_caching = TTM_PL_FLAG_WC;
fbd2895e
BS
403 if (dev_priv->card_type == NV_50)
404 man->gpu_offset = 0x40000000;
405 else
406 man->gpu_offset = 0;
6ee73861
BS
407 break;
408 case TTM_PL_TT:
d961db75 409 man->func = &ttm_bo_manager_func;
6ee73861
BS
410 switch (dev_priv->gart_info.type) {
411 case NOUVEAU_GART_AGP:
f32f02fd 412 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
413 man->available_caching = TTM_PL_FLAG_UNCACHED |
414 TTM_PL_FLAG_WC;
415 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
416 break;
417 case NOUVEAU_GART_SGDMA:
418 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
419 TTM_MEMTYPE_FLAG_CMA;
420 man->available_caching = TTM_PL_MASK_CACHING;
421 man->default_caching = TTM_PL_FLAG_CACHED;
422 break;
423 default:
424 NV_ERROR(dev, "Unknown GART type: %d\n",
425 dev_priv->gart_info.type);
426 return -EINVAL;
427 }
6ee73861
BS
428 man->gpu_offset = dev_priv->vm_gart_base;
429 break;
430 default:
431 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
432 return -EINVAL;
433 }
434 return 0;
435}
436
437static void
438nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
439{
440 struct nouveau_bo *nvbo = nouveau_bo(bo);
441
442 switch (bo->mem.mem_type) {
22fbd538 443 case TTM_PL_VRAM:
78ad0f7b
FJ
444 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
445 TTM_PL_FLAG_SYSTEM);
22fbd538 446 break;
6ee73861 447 default:
78ad0f7b 448 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
449 break;
450 }
22fbd538
FJ
451
452 *pl = nvbo->placement;
6ee73861
BS
453}
454
455
456/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
457 * TTM_PL_{VRAM,TT} directly.
458 */
a0af9add 459
6ee73861
BS
460static int
461nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
462 struct nouveau_bo *nvbo, bool evict,
463 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
464 struct ttm_mem_reg *new_mem)
465{
466 struct nouveau_fence *fence = NULL;
467 int ret;
468
469 ret = nouveau_fence_new(chan, &fence, true);
470 if (ret)
471 return ret;
472
64798817 473 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 474 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 475 nouveau_fence_unref(&fence);
6ee73861
BS
476 return ret;
477}
478
479static inline uint32_t
f1ab0cc9
BS
480nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
481 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
6ee73861 482{
f1ab0cc9
BS
483 struct nouveau_bo *nvbo = nouveau_bo(bo);
484
485 if (nvbo->no_vm) {
6ee73861
BS
486 if (mem->mem_type == TTM_PL_TT)
487 return NvDmaGART;
488 return NvDmaVRAM;
489 }
490
491 if (mem->mem_type == TTM_PL_TT)
492 return chan->gart_handle;
493 return chan->vram_handle;
494}
495
496static int
f1ab0cc9
BS
497nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
498 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 499{
6ee73861 500 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
f1ab0cc9
BS
501 struct nouveau_bo *nvbo = nouveau_bo(bo);
502 u64 length = (new_mem->num_pages << PAGE_SHIFT);
503 u64 src_offset, dst_offset;
6ee73861
BS
504 int ret;
505
d961db75
BS
506 src_offset = old_mem->start << PAGE_SHIFT;
507 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
508 if (!nvbo->no_vm) {
509 if (old_mem->mem_type == TTM_PL_VRAM)
6ee73861 510 src_offset += dev_priv->vm_vram_base;
6ee73861 511 else
f1ab0cc9
BS
512 src_offset += dev_priv->vm_gart_base;
513
514 if (new_mem->mem_type == TTM_PL_VRAM)
6ee73861 515 dst_offset += dev_priv->vm_vram_base;
f1ab0cc9
BS
516 else
517 dst_offset += dev_priv->vm_gart_base;
6ee73861
BS
518 }
519
520 ret = RING_SPACE(chan, 3);
521 if (ret)
522 return ret;
6ee73861 523
f1ab0cc9
BS
524 BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
525 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
526 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
527
528 while (length) {
529 u32 amount, stride, height;
530
5220b3c1
BS
531 amount = min(length, (u64)(4 * 1024 * 1024));
532 stride = 16 * 4;
f1ab0cc9
BS
533 height = amount / stride;
534
f13b3263
FJ
535 if (new_mem->mem_type == TTM_PL_VRAM &&
536 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
537 ret = RING_SPACE(chan, 8);
538 if (ret)
539 return ret;
540
541 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
542 OUT_RING (chan, 0);
5220b3c1 543 OUT_RING (chan, 0);
f1ab0cc9
BS
544 OUT_RING (chan, stride);
545 OUT_RING (chan, height);
546 OUT_RING (chan, 1);
547 OUT_RING (chan, 0);
548 OUT_RING (chan, 0);
549 } else {
550 ret = RING_SPACE(chan, 2);
551 if (ret)
552 return ret;
553
554 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
555 OUT_RING (chan, 1);
556 }
f13b3263
FJ
557 if (old_mem->mem_type == TTM_PL_VRAM &&
558 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
559 ret = RING_SPACE(chan, 8);
560 if (ret)
561 return ret;
562
563 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
564 OUT_RING (chan, 0);
5220b3c1 565 OUT_RING (chan, 0);
f1ab0cc9
BS
566 OUT_RING (chan, stride);
567 OUT_RING (chan, height);
568 OUT_RING (chan, 1);
569 OUT_RING (chan, 0);
570 OUT_RING (chan, 0);
571 } else {
572 ret = RING_SPACE(chan, 2);
573 if (ret)
574 return ret;
575
576 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
577 OUT_RING (chan, 1);
578 }
579
580 ret = RING_SPACE(chan, 14);
6ee73861
BS
581 if (ret)
582 return ret;
f1ab0cc9
BS
583
584 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
585 OUT_RING (chan, upper_32_bits(src_offset));
586 OUT_RING (chan, upper_32_bits(dst_offset));
587 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
588 OUT_RING (chan, lower_32_bits(src_offset));
589 OUT_RING (chan, lower_32_bits(dst_offset));
590 OUT_RING (chan, stride);
591 OUT_RING (chan, stride);
592 OUT_RING (chan, stride);
593 OUT_RING (chan, height);
594 OUT_RING (chan, 0x00000101);
595 OUT_RING (chan, 0x00000000);
596 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
597 OUT_RING (chan, 0);
598
599 length -= amount;
600 src_offset += amount;
601 dst_offset += amount;
6ee73861
BS
602 }
603
f1ab0cc9
BS
604 return 0;
605}
606
607static int
608nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
609 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
610{
d961db75
BS
611 u32 src_offset = old_mem->start << PAGE_SHIFT;
612 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
613 u32 page_count = new_mem->num_pages;
614 int ret;
615
616 ret = RING_SPACE(chan, 3);
617 if (ret)
618 return ret;
619
620 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
621 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
622 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
623
6ee73861
BS
624 page_count = new_mem->num_pages;
625 while (page_count) {
626 int line_count = (page_count > 2047) ? 2047 : page_count;
627
6ee73861
BS
628 ret = RING_SPACE(chan, 11);
629 if (ret)
630 return ret;
f1ab0cc9 631
6ee73861
BS
632 BEGIN_RING(chan, NvSubM2MF,
633 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
634 OUT_RING (chan, src_offset);
635 OUT_RING (chan, dst_offset);
636 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
637 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
638 OUT_RING (chan, PAGE_SIZE); /* line_length */
639 OUT_RING (chan, line_count);
640 OUT_RING (chan, 0x00000101);
641 OUT_RING (chan, 0x00000000);
6ee73861 642 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 643 OUT_RING (chan, 0);
6ee73861
BS
644
645 page_count -= line_count;
646 src_offset += (PAGE_SIZE * line_count);
647 dst_offset += (PAGE_SIZE * line_count);
648 }
649
f1ab0cc9
BS
650 return 0;
651}
652
653static int
654nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
655 bool no_wait_reserve, bool no_wait_gpu,
656 struct ttm_mem_reg *new_mem)
657{
658 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
659 struct nouveau_bo *nvbo = nouveau_bo(bo);
660 struct nouveau_channel *chan;
661 int ret;
662
663 chan = nvbo->channel;
6a6b73f2 664 if (!chan || nvbo->no_vm) {
f1ab0cc9 665 chan = dev_priv->channel;
e419cf09 666 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 667 }
f1ab0cc9
BS
668
669 if (dev_priv->card_type < NV_50)
670 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
671 else
672 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
673 if (ret == 0) {
674 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
675 no_wait_reserve,
676 no_wait_gpu, new_mem);
677 }
f1ab0cc9 678
6a6b73f2
BS
679 if (chan == dev_priv->channel)
680 mutex_unlock(&chan->mutex);
681 return ret;
6ee73861
BS
682}
683
684static int
685nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
686 bool no_wait_reserve, bool no_wait_gpu,
687 struct ttm_mem_reg *new_mem)
6ee73861
BS
688{
689 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
690 struct ttm_placement placement;
691 struct ttm_mem_reg tmp_mem;
692 int ret;
693
694 placement.fpfn = placement.lpfn = 0;
695 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 696 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
697
698 tmp_mem = *new_mem;
699 tmp_mem.mm_node = NULL;
9d87fa21 700 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
701 if (ret)
702 return ret;
703
704 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
705 if (ret)
706 goto out;
707
9d87fa21 708 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
709 if (ret)
710 goto out;
711
9d87fa21 712 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 713out:
42311ff9 714 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
715 return ret;
716}
717
718static int
719nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
720 bool no_wait_reserve, bool no_wait_gpu,
721 struct ttm_mem_reg *new_mem)
6ee73861
BS
722{
723 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
724 struct ttm_placement placement;
725 struct ttm_mem_reg tmp_mem;
726 int ret;
727
728 placement.fpfn = placement.lpfn = 0;
729 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 730 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
731
732 tmp_mem = *new_mem;
733 tmp_mem.mm_node = NULL;
9d87fa21 734 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
735 if (ret)
736 return ret;
737
9d87fa21 738 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
739 if (ret)
740 goto out;
741
9d87fa21 742 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
743 if (ret)
744 goto out;
745
746out:
42311ff9 747 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
748 return ret;
749}
750
751static int
a0af9add
FJ
752nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
753 struct nouveau_tile_reg **new_tile)
6ee73861
BS
754{
755 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 756 struct drm_device *dev = dev_priv->dev;
a0af9add
FJ
757 struct nouveau_bo *nvbo = nouveau_bo(bo);
758 uint64_t offset;
6ee73861
BS
759 int ret;
760
a0af9add
FJ
761 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
762 /* Nothing to do. */
763 *new_tile = NULL;
764 return 0;
765 }
766
d961db75 767 offset = new_mem->start << PAGE_SHIFT;
6ee73861 768
a0af9add 769 if (dev_priv->card_type == NV_50) {
6ee73861
BS
770 ret = nv50_mem_vm_bind_linear(dev,
771 offset + dev_priv->vm_vram_base,
f13b3263
FJ
772 new_mem->size,
773 nouveau_bo_tile_layout(nvbo),
6ee73861
BS
774 offset);
775 if (ret)
776 return ret;
a0af9add
FJ
777
778 } else if (dev_priv->card_type >= NV_10) {
779 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
780 nvbo->tile_mode,
781 nvbo->tile_flags);
6ee73861
BS
782 }
783
a0af9add
FJ
784 return 0;
785}
786
787static void
788nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
789 struct nouveau_tile_reg *new_tile,
790 struct nouveau_tile_reg **old_tile)
791{
792 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
793 struct drm_device *dev = dev_priv->dev;
794
795 if (dev_priv->card_type >= NV_10 &&
796 dev_priv->card_type < NV_50) {
a5cf68b0 797 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
a0af9add
FJ
798 *old_tile = new_tile;
799 }
800}
801
802static int
803nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
804 bool no_wait_reserve, bool no_wait_gpu,
805 struct ttm_mem_reg *new_mem)
a0af9add
FJ
806{
807 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
808 struct nouveau_bo *nvbo = nouveau_bo(bo);
809 struct ttm_mem_reg *old_mem = &bo->mem;
810 struct nouveau_tile_reg *new_tile = NULL;
811 int ret = 0;
812
813 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
814 if (ret)
815 return ret;
816
a0af9add 817 /* Fake bo copy. */
6ee73861
BS
818 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
819 BUG_ON(bo->mem.mm_node != NULL);
820 bo->mem = *new_mem;
821 new_mem->mm_node = NULL;
a0af9add 822 goto out;
6ee73861
BS
823 }
824
b8a6a804
BS
825 /* Software copy if the card isn't up and running yet. */
826 if (!dev_priv->channel) {
827 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
828 goto out;
829 }
830
a0af9add
FJ
831 /* Hardware assisted copy. */
832 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 833 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 834 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 835 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 836 else
9d87fa21 837 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 838
a0af9add
FJ
839 if (!ret)
840 goto out;
841
842 /* Fallback to software copy. */
9d87fa21 843 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
844
845out:
846 if (ret)
847 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
848 else
849 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
850
851 return ret;
6ee73861
BS
852}
853
854static int
855nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
856{
857 return 0;
858}
859
f32f02fd
JG
860static int
861nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
862{
863 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
864 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
865 struct drm_device *dev = dev_priv->dev;
f869ef88 866 int ret;
f32f02fd
JG
867
868 mem->bus.addr = NULL;
869 mem->bus.offset = 0;
870 mem->bus.size = mem->num_pages << PAGE_SHIFT;
871 mem->bus.base = 0;
872 mem->bus.is_iomem = false;
873 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
874 return -EINVAL;
875 switch (mem->mem_type) {
876 case TTM_PL_SYSTEM:
877 /* System memory */
878 return 0;
879 case TTM_PL_TT:
880#if __OS_HAS_AGP
881 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 882 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
883 mem->bus.base = dev_priv->gart_info.aper_base;
884 mem->bus.is_iomem = true;
885 }
886#endif
887 break;
888 case TTM_PL_VRAM:
f869ef88
BS
889 {
890 struct nouveau_vram *vram = mem->mm_node;
891
892 if (!dev_priv->bar1_vm) {
893 mem->bus.offset = mem->start << PAGE_SHIFT;
894 mem->bus.base = pci_resource_start(dev->pdev, 1);
895 mem->bus.is_iomem = true;
896 break;
897 }
898
899 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12,
900 NV_MEM_ACCESS_RW, &vram->bar_vma);
901 if (ret)
902 return ret;
903
904 nouveau_vm_map(&vram->bar_vma, vram);
905 if (ret) {
906 nouveau_vm_put(&vram->bar_vma);
907 return ret;
908 }
909
910 mem->bus.offset = vram->bar_vma.offset;
911 mem->bus.offset -= 0x0020000000ULL;
01d73a69 912 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 913 mem->bus.is_iomem = true;
f869ef88 914 }
f32f02fd
JG
915 break;
916 default:
917 return -EINVAL;
918 }
919 return 0;
920}
921
922static void
923nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
924{
f869ef88
BS
925 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
926 struct nouveau_vram *vram = mem->mm_node;
927
928 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
929 return;
930
931 if (!vram->bar_vma.node)
932 return;
933
934 nouveau_vm_unmap(&vram->bar_vma);
935 nouveau_vm_put(&vram->bar_vma);
f32f02fd
JG
936}
937
938static int
939nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
940{
e1429b4c
BS
941 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
942 struct nouveau_bo *nvbo = nouveau_bo(bo);
943
944 /* as long as the bo isn't in vram, and isn't tiled, we've got
945 * nothing to do here.
946 */
947 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
948 if (dev_priv->card_type < NV_50 ||
949 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
950 return 0;
951 }
952
953 /* make sure bo is in mappable vram */
d961db75 954 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
955 return 0;
956
957
958 nvbo->placement.fpfn = 0;
959 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
960 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 961 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
962}
963
332b242f
FJ
964void
965nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
966{
23c45e8e 967 struct nouveau_fence *old_fence;
332b242f
FJ
968
969 if (likely(fence))
23c45e8e 970 nouveau_fence_ref(fence);
332b242f 971
23c45e8e
FJ
972 spin_lock(&nvbo->bo.bdev->fence_lock);
973 old_fence = nvbo->bo.sync_obj;
974 nvbo->bo.sync_obj = fence;
332b242f 975 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
976
977 nouveau_fence_unref(&old_fence);
332b242f
FJ
978}
979
6ee73861
BS
980struct ttm_bo_driver nouveau_bo_driver = {
981 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
982 .invalidate_caches = nouveau_bo_invalidate_caches,
983 .init_mem_type = nouveau_bo_init_mem_type,
984 .evict_flags = nouveau_bo_evict_flags,
985 .move = nouveau_bo_move,
986 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
987 .sync_obj_signaled = __nouveau_fence_signalled,
988 .sync_obj_wait = __nouveau_fence_wait,
989 .sync_obj_flush = __nouveau_fence_flush,
990 .sync_obj_unref = __nouveau_fence_unref,
991 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
992 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
993 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
994 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
995};
996
This page took 0.114263 seconds and 5 git commands to generate.