drm/radeon: avoid deadlock if GPU lockup is detected in ib_pool_get
[deliverable/linux.git] / drivers / gpu / drm / nouveau / nouveau_bo.c
CommitLineData
6ee73861
BS
1/*
2 * Copyright 2007 Dave Airlied
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 */
24/*
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
28 */
29
30#include "drmP.h"
b1e5f172 31#include "ttm/ttm_page_alloc.h"
6ee73861
BS
32
33#include "nouveau_drm.h"
34#include "nouveau_drv.h"
35#include "nouveau_dma.h"
f869ef88
BS
36#include "nouveau_mm.h"
37#include "nouveau_vm.h"
6ee73861 38
a510604d 39#include <linux/log2.h>
5a0e3ad6 40#include <linux/slab.h>
a510604d 41
6ee73861
BS
42static void
43nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
44{
45 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
a0af9add 46 struct drm_device *dev = dev_priv->dev;
6ee73861
BS
47 struct nouveau_bo *nvbo = nouveau_bo(bo);
48
6ee73861
BS
49 if (unlikely(nvbo->gem))
50 DRM_ERROR("bo %p still attached to GEM object\n", bo);
51
a5cf68b0 52 nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
6ee73861
BS
53 kfree(nvbo);
54}
55
a0af9add 56static void
db5c8e29 57nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
f91bac5b 58 int *align, int *size)
a0af9add 59{
bfd83aca 60 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
a0af9add 61
573a2a37 62 if (dev_priv->card_type < NV_50) {
bfd83aca 63 if (nvbo->tile_mode) {
a0af9add
FJ
64 if (dev_priv->chipset >= 0x40) {
65 *align = 65536;
bfd83aca 66 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
67
68 } else if (dev_priv->chipset >= 0x30) {
69 *align = 32768;
bfd83aca 70 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
71
72 } else if (dev_priv->chipset >= 0x20) {
73 *align = 16384;
bfd83aca 74 *size = roundup(*size, 64 * nvbo->tile_mode);
a0af9add
FJ
75
76 } else if (dev_priv->chipset >= 0x10) {
77 *align = 16384;
bfd83aca 78 *size = roundup(*size, 32 * nvbo->tile_mode);
a0af9add
FJ
79 }
80 }
bfd83aca 81 } else {
f91bac5b
BS
82 *size = roundup(*size, (1 << nvbo->page_shift));
83 *align = max((1 << nvbo->page_shift), *align);
a0af9add
FJ
84 }
85
1c7059e4 86 *size = roundup(*size, PAGE_SIZE);
a0af9add
FJ
87}
88
6ee73861 89int
7375c95b
BS
90nouveau_bo_new(struct drm_device *dev, int size, int align,
91 uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
92 struct nouveau_bo **pnvbo)
6ee73861
BS
93{
94 struct drm_nouveau_private *dev_priv = dev->dev_private;
95 struct nouveau_bo *nvbo;
57de4ba9 96 size_t acc_size;
f91bac5b 97 int ret;
6ee73861
BS
98
99 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
100 if (!nvbo)
101 return -ENOMEM;
102 INIT_LIST_HEAD(&nvbo->head);
103 INIT_LIST_HEAD(&nvbo->entry);
fd2871af 104 INIT_LIST_HEAD(&nvbo->vma_list);
6ee73861
BS
105 nvbo->tile_mode = tile_mode;
106 nvbo->tile_flags = tile_flags;
699ddfd9 107 nvbo->bo.bdev = &dev_priv->ttm.bdev;
6ee73861 108
f91bac5b
BS
109 nvbo->page_shift = 12;
110 if (dev_priv->bar1_vm) {
111 if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
112 nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
113 }
114
115 nouveau_bo_fixup_align(nvbo, flags, &align, &size);
fd2871af
BS
116 nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
117 nouveau_bo_placement_set(nvbo, flags, 0);
6ee73861 118
57de4ba9
JG
119 acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
120 sizeof(struct nouveau_bo));
121
6ee73861 122 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
fd2871af 123 ttm_bo_type_device, &nvbo->placement,
57de4ba9 124 align >> PAGE_SHIFT, 0, false, NULL, acc_size,
fd2871af 125 nouveau_bo_del_ttm);
6ee73861
BS
126 if (ret) {
127 /* ttm will call nouveau_bo_del_ttm if it fails.. */
128 return ret;
129 }
130
6ee73861
BS
131 *pnvbo = nvbo;
132 return 0;
133}
134
78ad0f7b
FJ
135static void
136set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
137{
138 *n = 0;
139
140 if (type & TTM_PL_FLAG_VRAM)
141 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
142 if (type & TTM_PL_FLAG_TT)
143 pl[(*n)++] = TTM_PL_FLAG_TT | flags;
144 if (type & TTM_PL_FLAG_SYSTEM)
145 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
146}
147
699ddfd9
FJ
148static void
149set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
150{
151 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
812f219a 152 int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
699ddfd9
FJ
153
154 if (dev_priv->card_type == NV_10 &&
812f219a 155 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
4beb116a 156 nvbo->bo.mem.num_pages < vram_pages / 4) {
699ddfd9
FJ
157 /*
158 * Make sure that the color and depth buffers are handled
159 * by independent memory controller units. Up to a 9x
160 * speed up when alpha-blending and depth-test are enabled
161 * at the same time.
162 */
699ddfd9
FJ
163 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
164 nvbo->placement.fpfn = vram_pages / 2;
165 nvbo->placement.lpfn = ~0;
166 } else {
167 nvbo->placement.fpfn = 0;
168 nvbo->placement.lpfn = vram_pages / 2;
169 }
170 }
171}
172
6ee73861 173void
78ad0f7b 174nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
6ee73861 175{
78ad0f7b
FJ
176 struct ttm_placement *pl = &nvbo->placement;
177 uint32_t flags = TTM_PL_MASK_CACHING |
178 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
179
180 pl->placement = nvbo->placements;
181 set_placement_list(nvbo->placements, &pl->num_placement,
182 type, flags);
183
184 pl->busy_placement = nvbo->busy_placements;
185 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
186 type | busy, flags);
699ddfd9
FJ
187
188 set_placement_range(nvbo, type);
6ee73861
BS
189}
190
191int
192nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
193{
194 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
195 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 196 int ret;
6ee73861
BS
197
198 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
199 NV_ERROR(nouveau_bdev(bo->bdev)->dev,
200 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
201 1 << bo->mem.mem_type, memtype);
202 return -EINVAL;
203 }
204
205 if (nvbo->pin_refcnt++)
206 return 0;
207
208 ret = ttm_bo_reserve(bo, false, false, false, 0);
209 if (ret)
210 goto out;
211
78ad0f7b 212 nouveau_bo_placement_set(nvbo, memtype, 0);
6ee73861 213
7a45d764 214 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
215 if (ret == 0) {
216 switch (bo->mem.mem_type) {
217 case TTM_PL_VRAM:
218 dev_priv->fb_aper_free -= bo->mem.size;
219 break;
220 case TTM_PL_TT:
221 dev_priv->gart_info.aper_free -= bo->mem.size;
222 break;
223 default:
224 break;
225 }
226 }
227 ttm_bo_unreserve(bo);
228out:
229 if (unlikely(ret))
230 nvbo->pin_refcnt--;
231 return ret;
232}
233
234int
235nouveau_bo_unpin(struct nouveau_bo *nvbo)
236{
237 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
238 struct ttm_buffer_object *bo = &nvbo->bo;
78ad0f7b 239 int ret;
6ee73861
BS
240
241 if (--nvbo->pin_refcnt)
242 return 0;
243
244 ret = ttm_bo_reserve(bo, false, false, false, 0);
245 if (ret)
246 return ret;
247
78ad0f7b 248 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
6ee73861 249
7a45d764 250 ret = nouveau_bo_validate(nvbo, false, false, false);
6ee73861
BS
251 if (ret == 0) {
252 switch (bo->mem.mem_type) {
253 case TTM_PL_VRAM:
254 dev_priv->fb_aper_free += bo->mem.size;
255 break;
256 case TTM_PL_TT:
257 dev_priv->gart_info.aper_free += bo->mem.size;
258 break;
259 default:
260 break;
261 }
262 }
263
264 ttm_bo_unreserve(bo);
265 return ret;
266}
267
268int
269nouveau_bo_map(struct nouveau_bo *nvbo)
270{
271 int ret;
272
273 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
274 if (ret)
275 return ret;
276
277 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
278 ttm_bo_unreserve(&nvbo->bo);
279 return ret;
280}
281
282void
283nouveau_bo_unmap(struct nouveau_bo *nvbo)
284{
9d59e8a1
BS
285 if (nvbo)
286 ttm_bo_kunmap(&nvbo->kmap);
6ee73861
BS
287}
288
7a45d764
BS
289int
290nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
291 bool no_wait_reserve, bool no_wait_gpu)
292{
293 int ret;
294
295 ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
296 no_wait_reserve, no_wait_gpu);
297 if (ret)
298 return ret;
299
300 return 0;
301}
302
6ee73861
BS
303u16
304nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
305{
306 bool is_iomem;
307 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
308 mem = &mem[index];
309 if (is_iomem)
310 return ioread16_native((void __force __iomem *)mem);
311 else
312 return *mem;
313}
314
315void
316nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
317{
318 bool is_iomem;
319 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
320 mem = &mem[index];
321 if (is_iomem)
322 iowrite16_native(val, (void __force __iomem *)mem);
323 else
324 *mem = val;
325}
326
327u32
328nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
329{
330 bool is_iomem;
331 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
332 mem = &mem[index];
333 if (is_iomem)
334 return ioread32_native((void __force __iomem *)mem);
335 else
336 return *mem;
337}
338
339void
340nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
341{
342 bool is_iomem;
343 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
344 mem = &mem[index];
345 if (is_iomem)
346 iowrite32_native(val, (void __force __iomem *)mem);
347 else
348 *mem = val;
349}
350
649bf3ca
JG
351static struct ttm_tt *
352nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
353 unsigned long size, uint32_t page_flags,
354 struct page *dummy_read_page)
6ee73861
BS
355{
356 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
357 struct drm_device *dev = dev_priv->dev;
358
359 switch (dev_priv->gart_info.type) {
b694dfb2 360#if __OS_HAS_AGP
6ee73861 361 case NOUVEAU_GART_AGP:
649bf3ca
JG
362 return ttm_agp_tt_create(bdev, dev->agp->bridge,
363 size, page_flags, dummy_read_page);
b694dfb2 364#endif
58e6c7a9
BS
365 case NOUVEAU_GART_PDMA:
366 case NOUVEAU_GART_HW:
649bf3ca
JG
367 return nouveau_sgdma_create_ttm(bdev, size, page_flags,
368 dummy_read_page);
6ee73861
BS
369 default:
370 NV_ERROR(dev, "Unknown GART type %d\n",
371 dev_priv->gart_info.type);
372 break;
373 }
374
375 return NULL;
376}
377
378static int
379nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
380{
381 /* We'll do this from user space. */
382 return 0;
383}
384
385static int
386nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
387 struct ttm_mem_type_manager *man)
388{
389 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
390 struct drm_device *dev = dev_priv->dev;
391
392 switch (type) {
393 case TTM_PL_SYSTEM:
394 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
395 man->available_caching = TTM_PL_MASK_CACHING;
396 man->default_caching = TTM_PL_FLAG_CACHED;
397 break;
398 case TTM_PL_VRAM:
8984e046 399 if (dev_priv->card_type >= NV_50) {
573a2a37 400 man->func = &nouveau_vram_manager;
f869ef88
BS
401 man->io_reserve_fastpath = false;
402 man->use_io_reserve_lru = true;
403 } else {
573a2a37 404 man->func = &ttm_bo_manager_func;
f869ef88 405 }
6ee73861 406 man->flags = TTM_MEMTYPE_FLAG_FIXED |
f32f02fd 407 TTM_MEMTYPE_FLAG_MAPPABLE;
6ee73861
BS
408 man->available_caching = TTM_PL_FLAG_UNCACHED |
409 TTM_PL_FLAG_WC;
410 man->default_caching = TTM_PL_FLAG_WC;
6ee73861
BS
411 break;
412 case TTM_PL_TT:
26c0c9e3
BS
413 if (dev_priv->card_type >= NV_50)
414 man->func = &nouveau_gart_manager;
415 else
416 man->func = &ttm_bo_manager_func;
6ee73861
BS
417 switch (dev_priv->gart_info.type) {
418 case NOUVEAU_GART_AGP:
f32f02fd 419 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
a3d487ea
FJ
420 man->available_caching = TTM_PL_FLAG_UNCACHED |
421 TTM_PL_FLAG_WC;
422 man->default_caching = TTM_PL_FLAG_WC;
6ee73861 423 break;
58e6c7a9
BS
424 case NOUVEAU_GART_PDMA:
425 case NOUVEAU_GART_HW:
6ee73861
BS
426 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
427 TTM_MEMTYPE_FLAG_CMA;
428 man->available_caching = TTM_PL_MASK_CACHING;
429 man->default_caching = TTM_PL_FLAG_CACHED;
430 break;
431 default:
432 NV_ERROR(dev, "Unknown GART type: %d\n",
433 dev_priv->gart_info.type);
434 return -EINVAL;
435 }
6ee73861
BS
436 break;
437 default:
438 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
439 return -EINVAL;
440 }
441 return 0;
442}
443
444static void
445nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
446{
447 struct nouveau_bo *nvbo = nouveau_bo(bo);
448
449 switch (bo->mem.mem_type) {
22fbd538 450 case TTM_PL_VRAM:
78ad0f7b
FJ
451 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
452 TTM_PL_FLAG_SYSTEM);
22fbd538 453 break;
6ee73861 454 default:
78ad0f7b 455 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
6ee73861
BS
456 break;
457 }
22fbd538
FJ
458
459 *pl = nvbo->placement;
6ee73861
BS
460}
461
462
463/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
464 * TTM_PL_{VRAM,TT} directly.
465 */
a0af9add 466
6ee73861
BS
467static int
468nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
9d87fa21
JG
469 struct nouveau_bo *nvbo, bool evict,
470 bool no_wait_reserve, bool no_wait_gpu,
6ee73861
BS
471 struct ttm_mem_reg *new_mem)
472{
473 struct nouveau_fence *fence = NULL;
474 int ret;
475
476 ret = nouveau_fence_new(chan, &fence, true);
477 if (ret)
478 return ret;
479
64798817 480 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
311ab694 481 no_wait_reserve, no_wait_gpu, new_mem);
382d62e5 482 nouveau_fence_unref(&fence);
6ee73861
BS
483 return ret;
484}
485
183720b8
BS
486static int
487nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
488 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
489{
d2f96666
BS
490 struct nouveau_mem *node = old_mem->mm_node;
491 u64 src_offset = node->vma[0].offset;
492 u64 dst_offset = node->vma[1].offset;
183720b8
BS
493 u32 page_count = new_mem->num_pages;
494 int ret;
495
183720b8
BS
496 page_count = new_mem->num_pages;
497 while (page_count) {
498 int line_count = (page_count > 2047) ? 2047 : page_count;
499
500 ret = RING_SPACE(chan, 12);
501 if (ret)
502 return ret;
503
504 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2);
505 OUT_RING (chan, upper_32_bits(dst_offset));
506 OUT_RING (chan, lower_32_bits(dst_offset));
507 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6);
508 OUT_RING (chan, upper_32_bits(src_offset));
509 OUT_RING (chan, lower_32_bits(src_offset));
510 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
511 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
512 OUT_RING (chan, PAGE_SIZE); /* line_length */
513 OUT_RING (chan, line_count);
514 BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1);
515 OUT_RING (chan, 0x00100110);
516
517 page_count -= line_count;
518 src_offset += (PAGE_SIZE * line_count);
519 dst_offset += (PAGE_SIZE * line_count);
520 }
521
522 return 0;
523}
524
6ee73861 525static int
f1ab0cc9
BS
526nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
527 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
6ee73861 528{
d2f96666 529 struct nouveau_mem *node = old_mem->mm_node;
f1ab0cc9
BS
530 struct nouveau_bo *nvbo = nouveau_bo(bo);
531 u64 length = (new_mem->num_pages << PAGE_SHIFT);
d2f96666
BS
532 u64 src_offset = node->vma[0].offset;
533 u64 dst_offset = node->vma[1].offset;
6ee73861
BS
534 int ret;
535
f1ab0cc9
BS
536 while (length) {
537 u32 amount, stride, height;
538
5220b3c1
BS
539 amount = min(length, (u64)(4 * 1024 * 1024));
540 stride = 16 * 4;
f1ab0cc9
BS
541 height = amount / stride;
542
f13b3263
FJ
543 if (new_mem->mem_type == TTM_PL_VRAM &&
544 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
545 ret = RING_SPACE(chan, 8);
546 if (ret)
547 return ret;
548
549 BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
550 OUT_RING (chan, 0);
5220b3c1 551 OUT_RING (chan, 0);
f1ab0cc9
BS
552 OUT_RING (chan, stride);
553 OUT_RING (chan, height);
554 OUT_RING (chan, 1);
555 OUT_RING (chan, 0);
556 OUT_RING (chan, 0);
557 } else {
558 ret = RING_SPACE(chan, 2);
559 if (ret)
560 return ret;
561
562 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
563 OUT_RING (chan, 1);
564 }
f13b3263
FJ
565 if (old_mem->mem_type == TTM_PL_VRAM &&
566 nouveau_bo_tile_layout(nvbo)) {
f1ab0cc9
BS
567 ret = RING_SPACE(chan, 8);
568 if (ret)
569 return ret;
570
571 BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
572 OUT_RING (chan, 0);
5220b3c1 573 OUT_RING (chan, 0);
f1ab0cc9
BS
574 OUT_RING (chan, stride);
575 OUT_RING (chan, height);
576 OUT_RING (chan, 1);
577 OUT_RING (chan, 0);
578 OUT_RING (chan, 0);
579 } else {
580 ret = RING_SPACE(chan, 2);
581 if (ret)
582 return ret;
583
584 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
585 OUT_RING (chan, 1);
586 }
587
588 ret = RING_SPACE(chan, 14);
6ee73861
BS
589 if (ret)
590 return ret;
f1ab0cc9
BS
591
592 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
593 OUT_RING (chan, upper_32_bits(src_offset));
594 OUT_RING (chan, upper_32_bits(dst_offset));
595 BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
596 OUT_RING (chan, lower_32_bits(src_offset));
597 OUT_RING (chan, lower_32_bits(dst_offset));
598 OUT_RING (chan, stride);
599 OUT_RING (chan, stride);
600 OUT_RING (chan, stride);
601 OUT_RING (chan, height);
602 OUT_RING (chan, 0x00000101);
603 OUT_RING (chan, 0x00000000);
604 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
605 OUT_RING (chan, 0);
606
607 length -= amount;
608 src_offset += amount;
609 dst_offset += amount;
6ee73861
BS
610 }
611
f1ab0cc9
BS
612 return 0;
613}
614
a6704788
BS
615static inline uint32_t
616nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
617 struct nouveau_channel *chan, struct ttm_mem_reg *mem)
618{
619 if (mem->mem_type == TTM_PL_TT)
620 return chan->gart_handle;
621 return chan->vram_handle;
622}
623
f1ab0cc9
BS
624static int
625nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
626 struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
627{
d961db75
BS
628 u32 src_offset = old_mem->start << PAGE_SHIFT;
629 u32 dst_offset = new_mem->start << PAGE_SHIFT;
f1ab0cc9
BS
630 u32 page_count = new_mem->num_pages;
631 int ret;
632
633 ret = RING_SPACE(chan, 3);
634 if (ret)
635 return ret;
636
637 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
638 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
639 OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
640
6ee73861
BS
641 page_count = new_mem->num_pages;
642 while (page_count) {
643 int line_count = (page_count > 2047) ? 2047 : page_count;
644
6ee73861
BS
645 ret = RING_SPACE(chan, 11);
646 if (ret)
647 return ret;
f1ab0cc9 648
6ee73861
BS
649 BEGIN_RING(chan, NvSubM2MF,
650 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
f1ab0cc9
BS
651 OUT_RING (chan, src_offset);
652 OUT_RING (chan, dst_offset);
653 OUT_RING (chan, PAGE_SIZE); /* src_pitch */
654 OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
655 OUT_RING (chan, PAGE_SIZE); /* line_length */
656 OUT_RING (chan, line_count);
657 OUT_RING (chan, 0x00000101);
658 OUT_RING (chan, 0x00000000);
6ee73861 659 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
f1ab0cc9 660 OUT_RING (chan, 0);
6ee73861
BS
661
662 page_count -= line_count;
663 src_offset += (PAGE_SIZE * line_count);
664 dst_offset += (PAGE_SIZE * line_count);
665 }
666
f1ab0cc9
BS
667 return 0;
668}
669
d2f96666
BS
670static int
671nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
672 struct ttm_mem_reg *mem, struct nouveau_vma *vma)
673{
674 struct nouveau_mem *node = mem->mm_node;
675 int ret;
676
677 ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
678 node->page_shift, NV_MEM_ACCESS_RO, vma);
679 if (ret)
680 return ret;
681
682 if (mem->mem_type == TTM_PL_VRAM)
683 nouveau_vm_map(vma, node);
684 else
f7b24c42 685 nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
d2f96666
BS
686
687 return 0;
688}
689
f1ab0cc9
BS
690static int
691nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
692 bool no_wait_reserve, bool no_wait_gpu,
693 struct ttm_mem_reg *new_mem)
694{
695 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
696 struct nouveau_bo *nvbo = nouveau_bo(bo);
3425df48 697 struct ttm_mem_reg *old_mem = &bo->mem;
f1ab0cc9
BS
698 struct nouveau_channel *chan;
699 int ret;
700
701 chan = nvbo->channel;
d550c41e 702 if (!chan) {
f1ab0cc9 703 chan = dev_priv->channel;
e419cf09 704 mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
6a6b73f2 705 }
f1ab0cc9 706
d2f96666
BS
707 /* create temporary vmas for the transfer and attach them to the
708 * old nouveau_mem node, these will get cleaned up after ttm has
709 * destroyed the ttm_mem_reg
3425df48 710 */
26c0c9e3 711 if (dev_priv->card_type >= NV_50) {
d5f42394 712 struct nouveau_mem *node = old_mem->mm_node;
3425df48 713
d2f96666
BS
714 ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
715 if (ret)
716 goto out;
717
718 ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
719 if (ret)
720 goto out;
3425df48
BS
721 }
722
f1ab0cc9
BS
723 if (dev_priv->card_type < NV_50)
724 ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
725 else
183720b8 726 if (dev_priv->card_type < NV_C0)
f1ab0cc9 727 ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
183720b8
BS
728 else
729 ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
6a6b73f2
BS
730 if (ret == 0) {
731 ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
732 no_wait_reserve,
733 no_wait_gpu, new_mem);
734 }
f1ab0cc9 735
3425df48 736out:
6a6b73f2
BS
737 if (chan == dev_priv->channel)
738 mutex_unlock(&chan->mutex);
739 return ret;
6ee73861
BS
740}
741
742static int
743nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
744 bool no_wait_reserve, bool no_wait_gpu,
745 struct ttm_mem_reg *new_mem)
6ee73861
BS
746{
747 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
748 struct ttm_placement placement;
749 struct ttm_mem_reg tmp_mem;
750 int ret;
751
752 placement.fpfn = placement.lpfn = 0;
753 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 754 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
755
756 tmp_mem = *new_mem;
757 tmp_mem.mm_node = NULL;
9d87fa21 758 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
759 if (ret)
760 return ret;
761
762 ret = ttm_tt_bind(bo->ttm, &tmp_mem);
763 if (ret)
764 goto out;
765
9d87fa21 766 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
767 if (ret)
768 goto out;
769
b8884da6 770 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 771out:
42311ff9 772 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
773 return ret;
774}
775
776static int
777nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
778 bool no_wait_reserve, bool no_wait_gpu,
779 struct ttm_mem_reg *new_mem)
6ee73861
BS
780{
781 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
782 struct ttm_placement placement;
783 struct ttm_mem_reg tmp_mem;
784 int ret;
785
786 placement.fpfn = placement.lpfn = 0;
787 placement.num_placement = placement.num_busy_placement = 1;
77e2b5ed 788 placement.placement = placement.busy_placement = &placement_memtype;
6ee73861
BS
789
790 tmp_mem = *new_mem;
791 tmp_mem.mm_node = NULL;
9d87fa21 792 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
6ee73861
BS
793 if (ret)
794 return ret;
795
b8884da6 796 ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
6ee73861
BS
797 if (ret)
798 goto out;
799
b8884da6 800 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861
BS
801 if (ret)
802 goto out;
803
804out:
42311ff9 805 ttm_bo_mem_put(bo, &tmp_mem);
6ee73861
BS
806 return ret;
807}
808
a4154bbf
BS
809static void
810nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
811{
a4154bbf 812 struct nouveau_bo *nvbo = nouveau_bo(bo);
fd2871af
BS
813 struct nouveau_vma *vma;
814
815 list_for_each_entry(vma, &nvbo->vma_list, head) {
dc97b340 816 if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
fd2871af
BS
817 nouveau_vm_map(vma, new_mem->mm_node);
818 } else
dc97b340 819 if (new_mem && new_mem->mem_type == TTM_PL_TT &&
fd2871af
BS
820 nvbo->page_shift == vma->vm->spg_shift) {
821 nouveau_vm_map_sg(vma, 0, new_mem->
822 num_pages << PAGE_SHIFT,
f7b24c42 823 new_mem->mm_node);
fd2871af
BS
824 } else {
825 nouveau_vm_unmap(vma);
826 }
a4154bbf
BS
827 }
828}
829
6ee73861 830static int
a0af9add
FJ
831nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
832 struct nouveau_tile_reg **new_tile)
6ee73861
BS
833{
834 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
6ee73861 835 struct drm_device *dev = dev_priv->dev;
a0af9add 836 struct nouveau_bo *nvbo = nouveau_bo(bo);
a4154bbf 837 u64 offset = new_mem->start << PAGE_SHIFT;
6ee73861 838
a4154bbf
BS
839 *new_tile = NULL;
840 if (new_mem->mem_type != TTM_PL_VRAM)
a0af9add 841 return 0;
a0af9add 842
a4154bbf 843 if (dev_priv->card_type >= NV_10) {
a0af9add 844 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
a5cf68b0
FJ
845 nvbo->tile_mode,
846 nvbo->tile_flags);
6ee73861
BS
847 }
848
a0af9add
FJ
849 return 0;
850}
851
852static void
853nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
854 struct nouveau_tile_reg *new_tile,
855 struct nouveau_tile_reg **old_tile)
856{
857 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
858 struct drm_device *dev = dev_priv->dev;
859
a4154bbf
BS
860 nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
861 *old_tile = new_tile;
a0af9add
FJ
862}
863
864static int
865nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
9d87fa21
JG
866 bool no_wait_reserve, bool no_wait_gpu,
867 struct ttm_mem_reg *new_mem)
a0af9add
FJ
868{
869 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
870 struct nouveau_bo *nvbo = nouveau_bo(bo);
871 struct ttm_mem_reg *old_mem = &bo->mem;
872 struct nouveau_tile_reg *new_tile = NULL;
873 int ret = 0;
874
a4154bbf
BS
875 if (dev_priv->card_type < NV_50) {
876 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
877 if (ret)
878 return ret;
879 }
a0af9add 880
a0af9add 881 /* Fake bo copy. */
6ee73861
BS
882 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
883 BUG_ON(bo->mem.mm_node != NULL);
884 bo->mem = *new_mem;
885 new_mem->mm_node = NULL;
a0af9add 886 goto out;
6ee73861
BS
887 }
888
b8a6a804 889 /* Software copy if the card isn't up and running yet. */
183720b8 890 if (!dev_priv->channel) {
b8a6a804
BS
891 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
892 goto out;
893 }
894
a0af9add
FJ
895 /* Hardware assisted copy. */
896 if (new_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 897 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 898 else if (old_mem->mem_type == TTM_PL_SYSTEM)
9d87fa21 899 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add 900 else
9d87fa21 901 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
6ee73861 902
a0af9add
FJ
903 if (!ret)
904 goto out;
905
906 /* Fallback to software copy. */
9d87fa21 907 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
a0af9add
FJ
908
909out:
a4154bbf
BS
910 if (dev_priv->card_type < NV_50) {
911 if (ret)
912 nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
913 else
914 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
915 }
a0af9add
FJ
916
917 return ret;
6ee73861
BS
918}
919
920static int
921nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
922{
923 return 0;
924}
925
f32f02fd
JG
926static int
927nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
928{
929 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
930 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
931 struct drm_device *dev = dev_priv->dev;
f869ef88 932 int ret;
f32f02fd
JG
933
934 mem->bus.addr = NULL;
935 mem->bus.offset = 0;
936 mem->bus.size = mem->num_pages << PAGE_SHIFT;
937 mem->bus.base = 0;
938 mem->bus.is_iomem = false;
939 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
940 return -EINVAL;
941 switch (mem->mem_type) {
942 case TTM_PL_SYSTEM:
943 /* System memory */
944 return 0;
945 case TTM_PL_TT:
946#if __OS_HAS_AGP
947 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
d961db75 948 mem->bus.offset = mem->start << PAGE_SHIFT;
f32f02fd
JG
949 mem->bus.base = dev_priv->gart_info.aper_base;
950 mem->bus.is_iomem = true;
951 }
952#endif
953 break;
954 case TTM_PL_VRAM:
f869ef88 955 {
d5f42394 956 struct nouveau_mem *node = mem->mm_node;
8984e046 957 u8 page_shift;
f869ef88
BS
958
959 if (!dev_priv->bar1_vm) {
960 mem->bus.offset = mem->start << PAGE_SHIFT;
961 mem->bus.base = pci_resource_start(dev->pdev, 1);
962 mem->bus.is_iomem = true;
963 break;
964 }
965
2e9733ff 966 if (dev_priv->card_type >= NV_C0)
d5f42394 967 page_shift = node->page_shift;
8984e046
BS
968 else
969 page_shift = 12;
970
4c74eb7f 971 ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
8984e046 972 page_shift, NV_MEM_ACCESS_RW,
d5f42394 973 &node->bar_vma);
f869ef88
BS
974 if (ret)
975 return ret;
976
d5f42394 977 nouveau_vm_map(&node->bar_vma, node);
f869ef88 978 if (ret) {
d5f42394 979 nouveau_vm_put(&node->bar_vma);
f869ef88
BS
980 return ret;
981 }
982
d5f42394 983 mem->bus.offset = node->bar_vma.offset;
8984e046
BS
984 if (dev_priv->card_type == NV_50) /*XXX*/
985 mem->bus.offset -= 0x0020000000ULL;
01d73a69 986 mem->bus.base = pci_resource_start(dev->pdev, 1);
f32f02fd 987 mem->bus.is_iomem = true;
f869ef88 988 }
f32f02fd
JG
989 break;
990 default:
991 return -EINVAL;
992 }
993 return 0;
994}
995
996static void
997nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
998{
f869ef88 999 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
d5f42394 1000 struct nouveau_mem *node = mem->mm_node;
f869ef88
BS
1001
1002 if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
1003 return;
1004
d5f42394 1005 if (!node->bar_vma.node)
f869ef88
BS
1006 return;
1007
d5f42394
BS
1008 nouveau_vm_unmap(&node->bar_vma);
1009 nouveau_vm_put(&node->bar_vma);
f32f02fd
JG
1010}
1011
1012static int
1013nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
1014{
e1429b4c
BS
1015 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1016 struct nouveau_bo *nvbo = nouveau_bo(bo);
1017
1018 /* as long as the bo isn't in vram, and isn't tiled, we've got
1019 * nothing to do here.
1020 */
1021 if (bo->mem.mem_type != TTM_PL_VRAM) {
f13b3263
FJ
1022 if (dev_priv->card_type < NV_50 ||
1023 !nouveau_bo_tile_layout(nvbo))
e1429b4c
BS
1024 return 0;
1025 }
1026
1027 /* make sure bo is in mappable vram */
d961db75 1028 if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
e1429b4c
BS
1029 return 0;
1030
1031
1032 nvbo->placement.fpfn = 0;
1033 nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
1034 nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
7a45d764 1035 return nouveau_bo_validate(nvbo, false, true, false);
f32f02fd
JG
1036}
1037
332b242f
FJ
1038void
1039nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
1040{
23c45e8e 1041 struct nouveau_fence *old_fence;
332b242f
FJ
1042
1043 if (likely(fence))
23c45e8e 1044 nouveau_fence_ref(fence);
332b242f 1045
23c45e8e
FJ
1046 spin_lock(&nvbo->bo.bdev->fence_lock);
1047 old_fence = nvbo->bo.sync_obj;
1048 nvbo->bo.sync_obj = fence;
332b242f 1049 spin_unlock(&nvbo->bo.bdev->fence_lock);
23c45e8e
FJ
1050
1051 nouveau_fence_unref(&old_fence);
332b242f
FJ
1052}
1053
3230cfc3
KRW
1054static int
1055nouveau_ttm_tt_populate(struct ttm_tt *ttm)
1056{
8e7e7052 1057 struct ttm_dma_tt *ttm_dma = (void *)ttm;
3230cfc3
KRW
1058 struct drm_nouveau_private *dev_priv;
1059 struct drm_device *dev;
1060 unsigned i;
1061 int r;
1062
1063 if (ttm->state != tt_unpopulated)
1064 return 0;
1065
1066 dev_priv = nouveau_bdev(ttm->bdev);
1067 dev = dev_priv->dev;
1068
dea7e0ac
JG
1069#if __OS_HAS_AGP
1070 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1071 return ttm_agp_tt_populate(ttm);
1072 }
1073#endif
1074
3230cfc3
KRW
1075#ifdef CONFIG_SWIOTLB
1076 if (swiotlb_nr_tbl()) {
8e7e7052 1077 return ttm_dma_populate((void *)ttm, dev->dev);
3230cfc3
KRW
1078 }
1079#endif
1080
1081 r = ttm_pool_populate(ttm);
1082 if (r) {
1083 return r;
1084 }
1085
1086 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052 1087 ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
3230cfc3
KRW
1088 0, PAGE_SIZE,
1089 PCI_DMA_BIDIRECTIONAL);
8e7e7052 1090 if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
3230cfc3 1091 while (--i) {
8e7e7052 1092 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3 1093 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
8e7e7052 1094 ttm_dma->dma_address[i] = 0;
3230cfc3
KRW
1095 }
1096 ttm_pool_unpopulate(ttm);
1097 return -EFAULT;
1098 }
1099 }
1100 return 0;
1101}
1102
1103static void
1104nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
1105{
8e7e7052 1106 struct ttm_dma_tt *ttm_dma = (void *)ttm;
3230cfc3
KRW
1107 struct drm_nouveau_private *dev_priv;
1108 struct drm_device *dev;
1109 unsigned i;
1110
1111 dev_priv = nouveau_bdev(ttm->bdev);
1112 dev = dev_priv->dev;
1113
dea7e0ac
JG
1114#if __OS_HAS_AGP
1115 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
1116 ttm_agp_tt_unpopulate(ttm);
1117 return;
1118 }
1119#endif
1120
3230cfc3
KRW
1121#ifdef CONFIG_SWIOTLB
1122 if (swiotlb_nr_tbl()) {
8e7e7052 1123 ttm_dma_unpopulate((void *)ttm, dev->dev);
3230cfc3
KRW
1124 return;
1125 }
1126#endif
1127
1128 for (i = 0; i < ttm->num_pages; i++) {
8e7e7052
JG
1129 if (ttm_dma->dma_address[i]) {
1130 pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
3230cfc3
KRW
1131 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1132 }
1133 }
1134
1135 ttm_pool_unpopulate(ttm);
1136}
1137
6ee73861 1138struct ttm_bo_driver nouveau_bo_driver = {
649bf3ca 1139 .ttm_tt_create = &nouveau_ttm_tt_create,
3230cfc3
KRW
1140 .ttm_tt_populate = &nouveau_ttm_tt_populate,
1141 .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
6ee73861
BS
1142 .invalidate_caches = nouveau_bo_invalidate_caches,
1143 .init_mem_type = nouveau_bo_init_mem_type,
1144 .evict_flags = nouveau_bo_evict_flags,
a4154bbf 1145 .move_notify = nouveau_bo_move_ntfy,
6ee73861
BS
1146 .move = nouveau_bo_move,
1147 .verify_access = nouveau_bo_verify_access,
382d62e5
MS
1148 .sync_obj_signaled = __nouveau_fence_signalled,
1149 .sync_obj_wait = __nouveau_fence_wait,
1150 .sync_obj_flush = __nouveau_fence_flush,
1151 .sync_obj_unref = __nouveau_fence_unref,
1152 .sync_obj_ref = __nouveau_fence_ref,
f32f02fd
JG
1153 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
1154 .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
1155 .io_mem_free = &nouveau_ttm_io_mem_free,
6ee73861
BS
1156};
1157
fd2871af
BS
1158struct nouveau_vma *
1159nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
1160{
1161 struct nouveau_vma *vma;
1162 list_for_each_entry(vma, &nvbo->vma_list, head) {
1163 if (vma->vm == vm)
1164 return vma;
1165 }
1166
1167 return NULL;
1168}
1169
1170int
1171nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
1172 struct nouveau_vma *vma)
1173{
1174 const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
1175 struct nouveau_mem *node = nvbo->bo.mem.mm_node;
1176 int ret;
1177
1178 ret = nouveau_vm_get(vm, size, nvbo->page_shift,
1179 NV_MEM_ACCESS_RW, vma);
1180 if (ret)
1181 return ret;
1182
1183 if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
1184 nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
1185 else
1186 if (nvbo->bo.mem.mem_type == TTM_PL_TT)
f7b24c42 1187 nouveau_vm_map_sg(vma, 0, size, node);
fd2871af
BS
1188
1189 list_add_tail(&vma->head, &nvbo->vma_list);
2fd3db6f 1190 vma->refcount = 1;
fd2871af
BS
1191 return 0;
1192}
1193
1194void
1195nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
1196{
1197 if (vma->node) {
1198 if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
1199 spin_lock(&nvbo->bo.bdev->fence_lock);
1717c0e2 1200 ttm_bo_wait(&nvbo->bo, false, false, false);
fd2871af
BS
1201 spin_unlock(&nvbo->bo.bdev->fence_lock);
1202 nouveau_vm_unmap(vma);
1203 }
1204
1205 nouveau_vm_put(vma);
1206 list_del(&vma->head);
1207 }
1208}
This page took 0.178383 seconds and 5 git commands to generate.