| 1 | /* |
| 2 | * Copyright 2007 Dave Airlied |
| 3 | * All Rights Reserved. |
| 4 | * |
| 5 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 6 | * copy of this software and associated documentation files (the "Software"), |
| 7 | * to deal in the Software without restriction, including without limitation |
| 8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 9 | * and/or sell copies of the Software, and to permit persons to whom the |
| 10 | * Software is furnished to do so, subject to the following conditions: |
| 11 | * |
| 12 | * The above copyright notice and this permission notice (including the next |
| 13 | * paragraph) shall be included in all copies or substantial portions of the |
| 14 | * Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 22 | * OTHER DEALINGS IN THE SOFTWARE. |
| 23 | */ |
| 24 | /* |
| 25 | * Authors: Dave Airlied <airlied@linux.ie> |
| 26 | * Ben Skeggs <darktama@iinet.net.au> |
| 27 | * Jeremy Kolb <jkolb@brandeis.edu> |
| 28 | */ |
| 29 | |
| 30 | #include <core/engine.h> |
| 31 | #include <linux/swiotlb.h> |
| 32 | |
| 33 | #include <subdev/fb.h> |
| 34 | #include <subdev/vm.h> |
| 35 | #include <subdev/bar.h> |
| 36 | |
| 37 | #include "nouveau_drm.h" |
| 38 | #include "nouveau_dma.h" |
| 39 | #include "nouveau_fence.h" |
| 40 | |
| 41 | #include "nouveau_bo.h" |
| 42 | #include "nouveau_ttm.h" |
| 43 | #include "nouveau_gem.h" |
| 44 | |
| 45 | /* |
| 46 | * NV10-NV40 tiling helpers |
| 47 | */ |
| 48 | |
| 49 | static void |
| 50 | nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, |
| 51 | u32 addr, u32 size, u32 pitch, u32 flags) |
| 52 | { |
| 53 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 54 | int i = reg - drm->tile.reg; |
| 55 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
| 56 | struct nouveau_fb_tile *tile = &pfb->tile.region[i]; |
| 57 | struct nouveau_engine *engine; |
| 58 | |
| 59 | nouveau_fence_unref(®->fence); |
| 60 | |
| 61 | if (tile->pitch) |
| 62 | pfb->tile.fini(pfb, i, tile); |
| 63 | |
| 64 | if (pitch) |
| 65 | pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); |
| 66 | |
| 67 | pfb->tile.prog(pfb, i, tile); |
| 68 | |
| 69 | if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) |
| 70 | engine->tile_prog(engine, i); |
| 71 | if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) |
| 72 | engine->tile_prog(engine, i); |
| 73 | } |
| 74 | |
| 75 | static struct nouveau_drm_tile * |
| 76 | nv10_bo_get_tile_region(struct drm_device *dev, int i) |
| 77 | { |
| 78 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 79 | struct nouveau_drm_tile *tile = &drm->tile.reg[i]; |
| 80 | |
| 81 | spin_lock(&drm->tile.lock); |
| 82 | |
| 83 | if (!tile->used && |
| 84 | (!tile->fence || nouveau_fence_done(tile->fence))) |
| 85 | tile->used = true; |
| 86 | else |
| 87 | tile = NULL; |
| 88 | |
| 89 | spin_unlock(&drm->tile.lock); |
| 90 | return tile; |
| 91 | } |
| 92 | |
| 93 | static void |
| 94 | nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, |
| 95 | struct nouveau_fence *fence) |
| 96 | { |
| 97 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 98 | |
| 99 | if (tile) { |
| 100 | spin_lock(&drm->tile.lock); |
| 101 | tile->fence = nouveau_fence_ref(fence); |
| 102 | tile->used = false; |
| 103 | spin_unlock(&drm->tile.lock); |
| 104 | } |
| 105 | } |
| 106 | |
| 107 | static struct nouveau_drm_tile * |
| 108 | nv10_bo_set_tiling(struct drm_device *dev, u32 addr, |
| 109 | u32 size, u32 pitch, u32 flags) |
| 110 | { |
| 111 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 112 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
| 113 | struct nouveau_drm_tile *tile, *found = NULL; |
| 114 | int i; |
| 115 | |
| 116 | for (i = 0; i < pfb->tile.regions; i++) { |
| 117 | tile = nv10_bo_get_tile_region(dev, i); |
| 118 | |
| 119 | if (pitch && !found) { |
| 120 | found = tile; |
| 121 | continue; |
| 122 | |
| 123 | } else if (tile && pfb->tile.region[i].pitch) { |
| 124 | /* Kill an unused tile region. */ |
| 125 | nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); |
| 126 | } |
| 127 | |
| 128 | nv10_bo_put_tile_region(dev, tile, NULL); |
| 129 | } |
| 130 | |
| 131 | if (found) |
| 132 | nv10_bo_update_tile_region(dev, found, addr, size, |
| 133 | pitch, flags); |
| 134 | return found; |
| 135 | } |
| 136 | |
| 137 | static void |
| 138 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) |
| 139 | { |
| 140 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 141 | struct drm_device *dev = drm->dev; |
| 142 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 143 | |
| 144 | if (unlikely(nvbo->gem.filp)) |
| 145 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
| 146 | WARN_ON(nvbo->pin_refcnt > 0); |
| 147 | nv10_bo_put_tile_region(dev, nvbo->tile, NULL); |
| 148 | kfree(nvbo); |
| 149 | } |
| 150 | |
| 151 | static void |
| 152 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
| 153 | int *align, int *size) |
| 154 | { |
| 155 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 156 | struct nouveau_device *device = nv_device(drm->device); |
| 157 | |
| 158 | if (device->card_type < NV_50) { |
| 159 | if (nvbo->tile_mode) { |
| 160 | if (device->chipset >= 0x40) { |
| 161 | *align = 65536; |
| 162 | *size = roundup(*size, 64 * nvbo->tile_mode); |
| 163 | |
| 164 | } else if (device->chipset >= 0x30) { |
| 165 | *align = 32768; |
| 166 | *size = roundup(*size, 64 * nvbo->tile_mode); |
| 167 | |
| 168 | } else if (device->chipset >= 0x20) { |
| 169 | *align = 16384; |
| 170 | *size = roundup(*size, 64 * nvbo->tile_mode); |
| 171 | |
| 172 | } else if (device->chipset >= 0x10) { |
| 173 | *align = 16384; |
| 174 | *size = roundup(*size, 32 * nvbo->tile_mode); |
| 175 | } |
| 176 | } |
| 177 | } else { |
| 178 | *size = roundup(*size, (1 << nvbo->page_shift)); |
| 179 | *align = max((1 << nvbo->page_shift), *align); |
| 180 | } |
| 181 | |
| 182 | *size = roundup(*size, PAGE_SIZE); |
| 183 | } |
| 184 | |
| 185 | int |
| 186 | nouveau_bo_new(struct drm_device *dev, int size, int align, |
| 187 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, |
| 188 | struct sg_table *sg, |
| 189 | struct nouveau_bo **pnvbo) |
| 190 | { |
| 191 | struct nouveau_drm *drm = nouveau_drm(dev); |
| 192 | struct nouveau_bo *nvbo; |
| 193 | size_t acc_size; |
| 194 | int ret; |
| 195 | int type = ttm_bo_type_device; |
| 196 | int lpg_shift = 12; |
| 197 | int max_size; |
| 198 | |
| 199 | if (drm->client.base.vm) |
| 200 | lpg_shift = drm->client.base.vm->vmm->lpg_shift; |
| 201 | max_size = INT_MAX & ~((1 << lpg_shift) - 1); |
| 202 | |
| 203 | if (size <= 0 || size > max_size) { |
| 204 | nv_warn(drm, "skipped size %x\n", (u32)size); |
| 205 | return -EINVAL; |
| 206 | } |
| 207 | |
| 208 | if (sg) |
| 209 | type = ttm_bo_type_sg; |
| 210 | |
| 211 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); |
| 212 | if (!nvbo) |
| 213 | return -ENOMEM; |
| 214 | INIT_LIST_HEAD(&nvbo->head); |
| 215 | INIT_LIST_HEAD(&nvbo->entry); |
| 216 | INIT_LIST_HEAD(&nvbo->vma_list); |
| 217 | nvbo->tile_mode = tile_mode; |
| 218 | nvbo->tile_flags = tile_flags; |
| 219 | nvbo->bo.bdev = &drm->ttm.bdev; |
| 220 | |
| 221 | nvbo->page_shift = 12; |
| 222 | if (drm->client.base.vm) { |
| 223 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) |
| 224 | nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; |
| 225 | } |
| 226 | |
| 227 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); |
| 228 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
| 229 | nouveau_bo_placement_set(nvbo, flags, 0); |
| 230 | |
| 231 | acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, |
| 232 | sizeof(struct nouveau_bo)); |
| 233 | |
| 234 | ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, |
| 235 | type, &nvbo->placement, |
| 236 | align >> PAGE_SHIFT, false, NULL, acc_size, sg, |
| 237 | nouveau_bo_del_ttm); |
| 238 | if (ret) { |
| 239 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ |
| 240 | return ret; |
| 241 | } |
| 242 | |
| 243 | *pnvbo = nvbo; |
| 244 | return 0; |
| 245 | } |
| 246 | |
| 247 | static void |
| 248 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) |
| 249 | { |
| 250 | *n = 0; |
| 251 | |
| 252 | if (type & TTM_PL_FLAG_VRAM) |
| 253 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; |
| 254 | if (type & TTM_PL_FLAG_TT) |
| 255 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; |
| 256 | if (type & TTM_PL_FLAG_SYSTEM) |
| 257 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; |
| 258 | } |
| 259 | |
| 260 | static void |
| 261 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
| 262 | { |
| 263 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 264 | struct nouveau_fb *pfb = nouveau_fb(drm->device); |
| 265 | u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; |
| 266 | |
| 267 | if ((nv_device(drm->device)->card_type == NV_10 || |
| 268 | nv_device(drm->device)->card_type == NV_11) && |
| 269 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
| 270 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
| 271 | /* |
| 272 | * Make sure that the color and depth buffers are handled |
| 273 | * by independent memory controller units. Up to a 9x |
| 274 | * speed up when alpha-blending and depth-test are enabled |
| 275 | * at the same time. |
| 276 | */ |
| 277 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
| 278 | nvbo->placement.fpfn = vram_pages / 2; |
| 279 | nvbo->placement.lpfn = ~0; |
| 280 | } else { |
| 281 | nvbo->placement.fpfn = 0; |
| 282 | nvbo->placement.lpfn = vram_pages / 2; |
| 283 | } |
| 284 | } |
| 285 | } |
| 286 | |
| 287 | void |
| 288 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
| 289 | { |
| 290 | struct ttm_placement *pl = &nvbo->placement; |
| 291 | uint32_t flags = TTM_PL_MASK_CACHING | |
| 292 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); |
| 293 | |
| 294 | pl->placement = nvbo->placements; |
| 295 | set_placement_list(nvbo->placements, &pl->num_placement, |
| 296 | type, flags); |
| 297 | |
| 298 | pl->busy_placement = nvbo->busy_placements; |
| 299 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, |
| 300 | type | busy, flags); |
| 301 | |
| 302 | set_placement_range(nvbo, type); |
| 303 | } |
| 304 | |
| 305 | int |
| 306 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) |
| 307 | { |
| 308 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 309 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 310 | int ret; |
| 311 | |
| 312 | ret = ttm_bo_reserve(bo, false, false, false, 0); |
| 313 | if (ret) |
| 314 | goto out; |
| 315 | |
| 316 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { |
| 317 | NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, |
| 318 | 1 << bo->mem.mem_type, memtype); |
| 319 | ret = -EINVAL; |
| 320 | goto out; |
| 321 | } |
| 322 | |
| 323 | if (nvbo->pin_refcnt++) |
| 324 | goto out; |
| 325 | |
| 326 | nouveau_bo_placement_set(nvbo, memtype, 0); |
| 327 | |
| 328 | ret = nouveau_bo_validate(nvbo, false, false); |
| 329 | if (ret == 0) { |
| 330 | switch (bo->mem.mem_type) { |
| 331 | case TTM_PL_VRAM: |
| 332 | drm->gem.vram_available -= bo->mem.size; |
| 333 | break; |
| 334 | case TTM_PL_TT: |
| 335 | drm->gem.gart_available -= bo->mem.size; |
| 336 | break; |
| 337 | default: |
| 338 | break; |
| 339 | } |
| 340 | } |
| 341 | out: |
| 342 | ttm_bo_unreserve(bo); |
| 343 | return ret; |
| 344 | } |
| 345 | |
| 346 | int |
| 347 | nouveau_bo_unpin(struct nouveau_bo *nvbo) |
| 348 | { |
| 349 | struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); |
| 350 | struct ttm_buffer_object *bo = &nvbo->bo; |
| 351 | int ret, ref; |
| 352 | |
| 353 | ret = ttm_bo_reserve(bo, false, false, false, 0); |
| 354 | if (ret) |
| 355 | return ret; |
| 356 | |
| 357 | ref = --nvbo->pin_refcnt; |
| 358 | WARN_ON_ONCE(ref < 0); |
| 359 | if (ref) |
| 360 | goto out; |
| 361 | |
| 362 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
| 363 | |
| 364 | ret = nouveau_bo_validate(nvbo, false, false); |
| 365 | if (ret == 0) { |
| 366 | switch (bo->mem.mem_type) { |
| 367 | case TTM_PL_VRAM: |
| 368 | drm->gem.vram_available += bo->mem.size; |
| 369 | break; |
| 370 | case TTM_PL_TT: |
| 371 | drm->gem.gart_available += bo->mem.size; |
| 372 | break; |
| 373 | default: |
| 374 | break; |
| 375 | } |
| 376 | } |
| 377 | |
| 378 | out: |
| 379 | ttm_bo_unreserve(bo); |
| 380 | return ret; |
| 381 | } |
| 382 | |
| 383 | int |
| 384 | nouveau_bo_map(struct nouveau_bo *nvbo) |
| 385 | { |
| 386 | int ret; |
| 387 | |
| 388 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); |
| 389 | if (ret) |
| 390 | return ret; |
| 391 | |
| 392 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); |
| 393 | ttm_bo_unreserve(&nvbo->bo); |
| 394 | return ret; |
| 395 | } |
| 396 | |
| 397 | void |
| 398 | nouveau_bo_unmap(struct nouveau_bo *nvbo) |
| 399 | { |
| 400 | if (nvbo) |
| 401 | ttm_bo_kunmap(&nvbo->kmap); |
| 402 | } |
| 403 | |
| 404 | int |
| 405 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, |
| 406 | bool no_wait_gpu) |
| 407 | { |
| 408 | int ret; |
| 409 | |
| 410 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, |
| 411 | interruptible, no_wait_gpu); |
| 412 | if (ret) |
| 413 | return ret; |
| 414 | |
| 415 | return 0; |
| 416 | } |
| 417 | |
| 418 | u16 |
| 419 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) |
| 420 | { |
| 421 | bool is_iomem; |
| 422 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); |
| 423 | mem = &mem[index]; |
| 424 | if (is_iomem) |
| 425 | return ioread16_native((void __force __iomem *)mem); |
| 426 | else |
| 427 | return *mem; |
| 428 | } |
| 429 | |
| 430 | void |
| 431 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) |
| 432 | { |
| 433 | bool is_iomem; |
| 434 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); |
| 435 | mem = &mem[index]; |
| 436 | if (is_iomem) |
| 437 | iowrite16_native(val, (void __force __iomem *)mem); |
| 438 | else |
| 439 | *mem = val; |
| 440 | } |
| 441 | |
| 442 | u32 |
| 443 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) |
| 444 | { |
| 445 | bool is_iomem; |
| 446 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); |
| 447 | mem = &mem[index]; |
| 448 | if (is_iomem) |
| 449 | return ioread32_native((void __force __iomem *)mem); |
| 450 | else |
| 451 | return *mem; |
| 452 | } |
| 453 | |
| 454 | void |
| 455 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) |
| 456 | { |
| 457 | bool is_iomem; |
| 458 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); |
| 459 | mem = &mem[index]; |
| 460 | if (is_iomem) |
| 461 | iowrite32_native(val, (void __force __iomem *)mem); |
| 462 | else |
| 463 | *mem = val; |
| 464 | } |
| 465 | |
| 466 | static struct ttm_tt * |
| 467 | nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, |
| 468 | uint32_t page_flags, struct page *dummy_read) |
| 469 | { |
| 470 | #if __OS_HAS_AGP |
| 471 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
| 472 | struct drm_device *dev = drm->dev; |
| 473 | |
| 474 | if (drm->agp.stat == ENABLED) { |
| 475 | return ttm_agp_tt_create(bdev, dev->agp->bridge, size, |
| 476 | page_flags, dummy_read); |
| 477 | } |
| 478 | #endif |
| 479 | |
| 480 | return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); |
| 481 | } |
| 482 | |
| 483 | static int |
| 484 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
| 485 | { |
| 486 | /* We'll do this from user space. */ |
| 487 | return 0; |
| 488 | } |
| 489 | |
| 490 | static int |
| 491 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, |
| 492 | struct ttm_mem_type_manager *man) |
| 493 | { |
| 494 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
| 495 | |
| 496 | switch (type) { |
| 497 | case TTM_PL_SYSTEM: |
| 498 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
| 499 | man->available_caching = TTM_PL_MASK_CACHING; |
| 500 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 501 | break; |
| 502 | case TTM_PL_VRAM: |
| 503 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 504 | man->func = &nouveau_vram_manager; |
| 505 | man->io_reserve_fastpath = false; |
| 506 | man->use_io_reserve_lru = true; |
| 507 | } else { |
| 508 | man->func = &ttm_bo_manager_func; |
| 509 | } |
| 510 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
| 511 | TTM_MEMTYPE_FLAG_MAPPABLE; |
| 512 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
| 513 | TTM_PL_FLAG_WC; |
| 514 | man->default_caching = TTM_PL_FLAG_WC; |
| 515 | break; |
| 516 | case TTM_PL_TT: |
| 517 | if (nv_device(drm->device)->card_type >= NV_50) |
| 518 | man->func = &nouveau_gart_manager; |
| 519 | else |
| 520 | if (drm->agp.stat != ENABLED) |
| 521 | man->func = &nv04_gart_manager; |
| 522 | else |
| 523 | man->func = &ttm_bo_manager_func; |
| 524 | |
| 525 | if (drm->agp.stat == ENABLED) { |
| 526 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
| 527 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
| 528 | TTM_PL_FLAG_WC; |
| 529 | man->default_caching = TTM_PL_FLAG_WC; |
| 530 | } else { |
| 531 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
| 532 | TTM_MEMTYPE_FLAG_CMA; |
| 533 | man->available_caching = TTM_PL_MASK_CACHING; |
| 534 | man->default_caching = TTM_PL_FLAG_CACHED; |
| 535 | } |
| 536 | |
| 537 | break; |
| 538 | default: |
| 539 | return -EINVAL; |
| 540 | } |
| 541 | return 0; |
| 542 | } |
| 543 | |
| 544 | static void |
| 545 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) |
| 546 | { |
| 547 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 548 | |
| 549 | switch (bo->mem.mem_type) { |
| 550 | case TTM_PL_VRAM: |
| 551 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
| 552 | TTM_PL_FLAG_SYSTEM); |
| 553 | break; |
| 554 | default: |
| 555 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
| 556 | break; |
| 557 | } |
| 558 | |
| 559 | *pl = nvbo->placement; |
| 560 | } |
| 561 | |
| 562 | |
| 563 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access |
| 564 | * TTM_PL_{VRAM,TT} directly. |
| 565 | */ |
| 566 | |
| 567 | static int |
| 568 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, |
| 569 | struct nouveau_bo *nvbo, bool evict, |
| 570 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
| 571 | { |
| 572 | struct nouveau_fence *fence = NULL; |
| 573 | int ret; |
| 574 | |
| 575 | ret = nouveau_fence_new(chan, false, &fence); |
| 576 | if (ret) |
| 577 | return ret; |
| 578 | |
| 579 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict, |
| 580 | no_wait_gpu, new_mem); |
| 581 | nouveau_fence_unref(&fence); |
| 582 | return ret; |
| 583 | } |
| 584 | |
| 585 | static int |
| 586 | nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) |
| 587 | { |
| 588 | int ret = RING_SPACE(chan, 2); |
| 589 | if (ret == 0) { |
| 590 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
| 591 | OUT_RING (chan, handle & 0x0000ffff); |
| 592 | FIRE_RING (chan); |
| 593 | } |
| 594 | return ret; |
| 595 | } |
| 596 | |
| 597 | static int |
| 598 | nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 599 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 600 | { |
| 601 | struct nouveau_mem *node = old_mem->mm_node; |
| 602 | int ret = RING_SPACE(chan, 10); |
| 603 | if (ret == 0) { |
| 604 | BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); |
| 605 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); |
| 606 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); |
| 607 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); |
| 608 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); |
| 609 | OUT_RING (chan, PAGE_SIZE); |
| 610 | OUT_RING (chan, PAGE_SIZE); |
| 611 | OUT_RING (chan, PAGE_SIZE); |
| 612 | OUT_RING (chan, new_mem->num_pages); |
| 613 | BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); |
| 614 | } |
| 615 | return ret; |
| 616 | } |
| 617 | |
| 618 | static int |
| 619 | nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) |
| 620 | { |
| 621 | int ret = RING_SPACE(chan, 2); |
| 622 | if (ret == 0) { |
| 623 | BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); |
| 624 | OUT_RING (chan, handle); |
| 625 | } |
| 626 | return ret; |
| 627 | } |
| 628 | |
| 629 | static int |
| 630 | nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 631 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 632 | { |
| 633 | struct nouveau_mem *node = old_mem->mm_node; |
| 634 | u64 src_offset = node->vma[0].offset; |
| 635 | u64 dst_offset = node->vma[1].offset; |
| 636 | u32 page_count = new_mem->num_pages; |
| 637 | int ret; |
| 638 | |
| 639 | page_count = new_mem->num_pages; |
| 640 | while (page_count) { |
| 641 | int line_count = (page_count > 8191) ? 8191 : page_count; |
| 642 | |
| 643 | ret = RING_SPACE(chan, 11); |
| 644 | if (ret) |
| 645 | return ret; |
| 646 | |
| 647 | BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); |
| 648 | OUT_RING (chan, upper_32_bits(src_offset)); |
| 649 | OUT_RING (chan, lower_32_bits(src_offset)); |
| 650 | OUT_RING (chan, upper_32_bits(dst_offset)); |
| 651 | OUT_RING (chan, lower_32_bits(dst_offset)); |
| 652 | OUT_RING (chan, PAGE_SIZE); |
| 653 | OUT_RING (chan, PAGE_SIZE); |
| 654 | OUT_RING (chan, PAGE_SIZE); |
| 655 | OUT_RING (chan, line_count); |
| 656 | BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
| 657 | OUT_RING (chan, 0x00000110); |
| 658 | |
| 659 | page_count -= line_count; |
| 660 | src_offset += (PAGE_SIZE * line_count); |
| 661 | dst_offset += (PAGE_SIZE * line_count); |
| 662 | } |
| 663 | |
| 664 | return 0; |
| 665 | } |
| 666 | |
| 667 | static int |
| 668 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 669 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 670 | { |
| 671 | struct nouveau_mem *node = old_mem->mm_node; |
| 672 | u64 src_offset = node->vma[0].offset; |
| 673 | u64 dst_offset = node->vma[1].offset; |
| 674 | u32 page_count = new_mem->num_pages; |
| 675 | int ret; |
| 676 | |
| 677 | page_count = new_mem->num_pages; |
| 678 | while (page_count) { |
| 679 | int line_count = (page_count > 2047) ? 2047 : page_count; |
| 680 | |
| 681 | ret = RING_SPACE(chan, 12); |
| 682 | if (ret) |
| 683 | return ret; |
| 684 | |
| 685 | BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); |
| 686 | OUT_RING (chan, upper_32_bits(dst_offset)); |
| 687 | OUT_RING (chan, lower_32_bits(dst_offset)); |
| 688 | BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); |
| 689 | OUT_RING (chan, upper_32_bits(src_offset)); |
| 690 | OUT_RING (chan, lower_32_bits(src_offset)); |
| 691 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ |
| 692 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ |
| 693 | OUT_RING (chan, PAGE_SIZE); /* line_length */ |
| 694 | OUT_RING (chan, line_count); |
| 695 | BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); |
| 696 | OUT_RING (chan, 0x00100110); |
| 697 | |
| 698 | page_count -= line_count; |
| 699 | src_offset += (PAGE_SIZE * line_count); |
| 700 | dst_offset += (PAGE_SIZE * line_count); |
| 701 | } |
| 702 | |
| 703 | return 0; |
| 704 | } |
| 705 | |
| 706 | static int |
| 707 | nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 708 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 709 | { |
| 710 | struct nouveau_mem *node = old_mem->mm_node; |
| 711 | u64 src_offset = node->vma[0].offset; |
| 712 | u64 dst_offset = node->vma[1].offset; |
| 713 | u32 page_count = new_mem->num_pages; |
| 714 | int ret; |
| 715 | |
| 716 | page_count = new_mem->num_pages; |
| 717 | while (page_count) { |
| 718 | int line_count = (page_count > 8191) ? 8191 : page_count; |
| 719 | |
| 720 | ret = RING_SPACE(chan, 11); |
| 721 | if (ret) |
| 722 | return ret; |
| 723 | |
| 724 | BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
| 725 | OUT_RING (chan, upper_32_bits(src_offset)); |
| 726 | OUT_RING (chan, lower_32_bits(src_offset)); |
| 727 | OUT_RING (chan, upper_32_bits(dst_offset)); |
| 728 | OUT_RING (chan, lower_32_bits(dst_offset)); |
| 729 | OUT_RING (chan, PAGE_SIZE); |
| 730 | OUT_RING (chan, PAGE_SIZE); |
| 731 | OUT_RING (chan, PAGE_SIZE); |
| 732 | OUT_RING (chan, line_count); |
| 733 | BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); |
| 734 | OUT_RING (chan, 0x00000110); |
| 735 | |
| 736 | page_count -= line_count; |
| 737 | src_offset += (PAGE_SIZE * line_count); |
| 738 | dst_offset += (PAGE_SIZE * line_count); |
| 739 | } |
| 740 | |
| 741 | return 0; |
| 742 | } |
| 743 | |
| 744 | static int |
| 745 | nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 746 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 747 | { |
| 748 | struct nouveau_mem *node = old_mem->mm_node; |
| 749 | int ret = RING_SPACE(chan, 7); |
| 750 | if (ret == 0) { |
| 751 | BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); |
| 752 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); |
| 753 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); |
| 754 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); |
| 755 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); |
| 756 | OUT_RING (chan, 0x00000000 /* COPY */); |
| 757 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); |
| 758 | } |
| 759 | return ret; |
| 760 | } |
| 761 | |
| 762 | static int |
| 763 | nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 764 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 765 | { |
| 766 | struct nouveau_mem *node = old_mem->mm_node; |
| 767 | int ret = RING_SPACE(chan, 7); |
| 768 | if (ret == 0) { |
| 769 | BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); |
| 770 | OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); |
| 771 | OUT_RING (chan, upper_32_bits(node->vma[0].offset)); |
| 772 | OUT_RING (chan, lower_32_bits(node->vma[0].offset)); |
| 773 | OUT_RING (chan, upper_32_bits(node->vma[1].offset)); |
| 774 | OUT_RING (chan, lower_32_bits(node->vma[1].offset)); |
| 775 | OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); |
| 776 | } |
| 777 | return ret; |
| 778 | } |
| 779 | |
| 780 | static int |
| 781 | nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) |
| 782 | { |
| 783 | int ret = RING_SPACE(chan, 6); |
| 784 | if (ret == 0) { |
| 785 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
| 786 | OUT_RING (chan, handle); |
| 787 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); |
| 788 | OUT_RING (chan, NvNotify0); |
| 789 | OUT_RING (chan, NvDmaFB); |
| 790 | OUT_RING (chan, NvDmaFB); |
| 791 | } |
| 792 | |
| 793 | return ret; |
| 794 | } |
| 795 | |
| 796 | static int |
| 797 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 798 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 799 | { |
| 800 | struct nouveau_mem *node = old_mem->mm_node; |
| 801 | u64 length = (new_mem->num_pages << PAGE_SHIFT); |
| 802 | u64 src_offset = node->vma[0].offset; |
| 803 | u64 dst_offset = node->vma[1].offset; |
| 804 | int src_tiled = !!node->memtype; |
| 805 | int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype; |
| 806 | int ret; |
| 807 | |
| 808 | while (length) { |
| 809 | u32 amount, stride, height; |
| 810 | |
| 811 | ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); |
| 812 | if (ret) |
| 813 | return ret; |
| 814 | |
| 815 | amount = min(length, (u64)(4 * 1024 * 1024)); |
| 816 | stride = 16 * 4; |
| 817 | height = amount / stride; |
| 818 | |
| 819 | if (src_tiled) { |
| 820 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); |
| 821 | OUT_RING (chan, 0); |
| 822 | OUT_RING (chan, 0); |
| 823 | OUT_RING (chan, stride); |
| 824 | OUT_RING (chan, height); |
| 825 | OUT_RING (chan, 1); |
| 826 | OUT_RING (chan, 0); |
| 827 | OUT_RING (chan, 0); |
| 828 | } else { |
| 829 | BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); |
| 830 | OUT_RING (chan, 1); |
| 831 | } |
| 832 | if (dst_tiled) { |
| 833 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); |
| 834 | OUT_RING (chan, 0); |
| 835 | OUT_RING (chan, 0); |
| 836 | OUT_RING (chan, stride); |
| 837 | OUT_RING (chan, height); |
| 838 | OUT_RING (chan, 1); |
| 839 | OUT_RING (chan, 0); |
| 840 | OUT_RING (chan, 0); |
| 841 | } else { |
| 842 | BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); |
| 843 | OUT_RING (chan, 1); |
| 844 | } |
| 845 | |
| 846 | BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); |
| 847 | OUT_RING (chan, upper_32_bits(src_offset)); |
| 848 | OUT_RING (chan, upper_32_bits(dst_offset)); |
| 849 | BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); |
| 850 | OUT_RING (chan, lower_32_bits(src_offset)); |
| 851 | OUT_RING (chan, lower_32_bits(dst_offset)); |
| 852 | OUT_RING (chan, stride); |
| 853 | OUT_RING (chan, stride); |
| 854 | OUT_RING (chan, stride); |
| 855 | OUT_RING (chan, height); |
| 856 | OUT_RING (chan, 0x00000101); |
| 857 | OUT_RING (chan, 0x00000000); |
| 858 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
| 859 | OUT_RING (chan, 0); |
| 860 | |
| 861 | length -= amount; |
| 862 | src_offset += amount; |
| 863 | dst_offset += amount; |
| 864 | } |
| 865 | |
| 866 | return 0; |
| 867 | } |
| 868 | |
| 869 | static int |
| 870 | nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) |
| 871 | { |
| 872 | int ret = RING_SPACE(chan, 4); |
| 873 | if (ret == 0) { |
| 874 | BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); |
| 875 | OUT_RING (chan, handle); |
| 876 | BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); |
| 877 | OUT_RING (chan, NvNotify0); |
| 878 | } |
| 879 | |
| 880 | return ret; |
| 881 | } |
| 882 | |
| 883 | static inline uint32_t |
| 884 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
| 885 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) |
| 886 | { |
| 887 | if (mem->mem_type == TTM_PL_TT) |
| 888 | return NvDmaTT; |
| 889 | return NvDmaFB; |
| 890 | } |
| 891 | |
| 892 | static int |
| 893 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
| 894 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) |
| 895 | { |
| 896 | u32 src_offset = old_mem->start << PAGE_SHIFT; |
| 897 | u32 dst_offset = new_mem->start << PAGE_SHIFT; |
| 898 | u32 page_count = new_mem->num_pages; |
| 899 | int ret; |
| 900 | |
| 901 | ret = RING_SPACE(chan, 3); |
| 902 | if (ret) |
| 903 | return ret; |
| 904 | |
| 905 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); |
| 906 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); |
| 907 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); |
| 908 | |
| 909 | page_count = new_mem->num_pages; |
| 910 | while (page_count) { |
| 911 | int line_count = (page_count > 2047) ? 2047 : page_count; |
| 912 | |
| 913 | ret = RING_SPACE(chan, 11); |
| 914 | if (ret) |
| 915 | return ret; |
| 916 | |
| 917 | BEGIN_NV04(chan, NvSubCopy, |
| 918 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); |
| 919 | OUT_RING (chan, src_offset); |
| 920 | OUT_RING (chan, dst_offset); |
| 921 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ |
| 922 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ |
| 923 | OUT_RING (chan, PAGE_SIZE); /* line_length */ |
| 924 | OUT_RING (chan, line_count); |
| 925 | OUT_RING (chan, 0x00000101); |
| 926 | OUT_RING (chan, 0x00000000); |
| 927 | BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
| 928 | OUT_RING (chan, 0); |
| 929 | |
| 930 | page_count -= line_count; |
| 931 | src_offset += (PAGE_SIZE * line_count); |
| 932 | dst_offset += (PAGE_SIZE * line_count); |
| 933 | } |
| 934 | |
| 935 | return 0; |
| 936 | } |
| 937 | |
| 938 | static int |
| 939 | nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, |
| 940 | struct ttm_mem_reg *mem) |
| 941 | { |
| 942 | struct nouveau_mem *old_node = bo->mem.mm_node; |
| 943 | struct nouveau_mem *new_node = mem->mm_node; |
| 944 | u64 size = (u64)mem->num_pages << PAGE_SHIFT; |
| 945 | int ret; |
| 946 | |
| 947 | ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, |
| 948 | NV_MEM_ACCESS_RW, &old_node->vma[0]); |
| 949 | if (ret) |
| 950 | return ret; |
| 951 | |
| 952 | ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, |
| 953 | NV_MEM_ACCESS_RW, &old_node->vma[1]); |
| 954 | if (ret) { |
| 955 | nouveau_vm_put(&old_node->vma[0]); |
| 956 | return ret; |
| 957 | } |
| 958 | |
| 959 | nouveau_vm_map(&old_node->vma[0], old_node); |
| 960 | nouveau_vm_map(&old_node->vma[1], new_node); |
| 961 | return 0; |
| 962 | } |
| 963 | |
| 964 | static int |
| 965 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, |
| 966 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
| 967 | { |
| 968 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 969 | struct nouveau_channel *chan = drm->ttm.chan; |
| 970 | int ret; |
| 971 | |
| 972 | /* create temporary vmas for the transfer and attach them to the |
| 973 | * old nouveau_mem node, these will get cleaned up after ttm has |
| 974 | * destroyed the ttm_mem_reg |
| 975 | */ |
| 976 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 977 | ret = nouveau_bo_move_prep(drm, bo, new_mem); |
| 978 | if (ret) |
| 979 | return ret; |
| 980 | } |
| 981 | |
| 982 | mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); |
| 983 | |
| 984 | ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); |
| 985 | if (ret == 0) { |
| 986 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 987 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, |
| 988 | no_wait_gpu, new_mem); |
| 989 | } |
| 990 | |
| 991 | mutex_unlock(&chan->cli->mutex); |
| 992 | return ret; |
| 993 | } |
| 994 | |
| 995 | void |
| 996 | nouveau_bo_move_init(struct nouveau_drm *drm) |
| 997 | { |
| 998 | static const struct { |
| 999 | const char *name; |
| 1000 | int engine; |
| 1001 | u32 oclass; |
| 1002 | int (*exec)(struct nouveau_channel *, |
| 1003 | struct ttm_buffer_object *, |
| 1004 | struct ttm_mem_reg *, struct ttm_mem_reg *); |
| 1005 | int (*init)(struct nouveau_channel *, u32 handle); |
| 1006 | } _methods[] = { |
| 1007 | { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, |
| 1008 | { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, |
| 1009 | { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, |
| 1010 | { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, |
| 1011 | { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, |
| 1012 | { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, |
| 1013 | { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, |
| 1014 | { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, |
| 1015 | { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, |
| 1016 | {}, |
| 1017 | { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, |
| 1018 | }, *mthd = _methods; |
| 1019 | const char *name = "CPU"; |
| 1020 | int ret; |
| 1021 | |
| 1022 | do { |
| 1023 | struct nouveau_object *object; |
| 1024 | struct nouveau_channel *chan; |
| 1025 | u32 handle = (mthd->engine << 16) | mthd->oclass; |
| 1026 | |
| 1027 | if (mthd->engine) |
| 1028 | chan = drm->cechan; |
| 1029 | else |
| 1030 | chan = drm->channel; |
| 1031 | if (chan == NULL) |
| 1032 | continue; |
| 1033 | |
| 1034 | ret = nouveau_object_new(nv_object(drm), chan->handle, handle, |
| 1035 | mthd->oclass, NULL, 0, &object); |
| 1036 | if (ret == 0) { |
| 1037 | ret = mthd->init(chan, handle); |
| 1038 | if (ret) { |
| 1039 | nouveau_object_del(nv_object(drm), |
| 1040 | chan->handle, handle); |
| 1041 | continue; |
| 1042 | } |
| 1043 | |
| 1044 | drm->ttm.move = mthd->exec; |
| 1045 | drm->ttm.chan = chan; |
| 1046 | name = mthd->name; |
| 1047 | break; |
| 1048 | } |
| 1049 | } while ((++mthd)->exec); |
| 1050 | |
| 1051 | NV_INFO(drm, "MM: using %s for buffer copies\n", name); |
| 1052 | } |
| 1053 | |
| 1054 | static int |
| 1055 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, |
| 1056 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
| 1057 | { |
| 1058 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
| 1059 | struct ttm_placement placement; |
| 1060 | struct ttm_mem_reg tmp_mem; |
| 1061 | int ret; |
| 1062 | |
| 1063 | placement.fpfn = placement.lpfn = 0; |
| 1064 | placement.num_placement = placement.num_busy_placement = 1; |
| 1065 | placement.placement = placement.busy_placement = &placement_memtype; |
| 1066 | |
| 1067 | tmp_mem = *new_mem; |
| 1068 | tmp_mem.mm_node = NULL; |
| 1069 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); |
| 1070 | if (ret) |
| 1071 | return ret; |
| 1072 | |
| 1073 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); |
| 1074 | if (ret) |
| 1075 | goto out; |
| 1076 | |
| 1077 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); |
| 1078 | if (ret) |
| 1079 | goto out; |
| 1080 | |
| 1081 | ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); |
| 1082 | out: |
| 1083 | ttm_bo_mem_put(bo, &tmp_mem); |
| 1084 | return ret; |
| 1085 | } |
| 1086 | |
| 1087 | static int |
| 1088 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, |
| 1089 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
| 1090 | { |
| 1091 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; |
| 1092 | struct ttm_placement placement; |
| 1093 | struct ttm_mem_reg tmp_mem; |
| 1094 | int ret; |
| 1095 | |
| 1096 | placement.fpfn = placement.lpfn = 0; |
| 1097 | placement.num_placement = placement.num_busy_placement = 1; |
| 1098 | placement.placement = placement.busy_placement = &placement_memtype; |
| 1099 | |
| 1100 | tmp_mem = *new_mem; |
| 1101 | tmp_mem.mm_node = NULL; |
| 1102 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); |
| 1103 | if (ret) |
| 1104 | return ret; |
| 1105 | |
| 1106 | ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); |
| 1107 | if (ret) |
| 1108 | goto out; |
| 1109 | |
| 1110 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); |
| 1111 | if (ret) |
| 1112 | goto out; |
| 1113 | |
| 1114 | out: |
| 1115 | ttm_bo_mem_put(bo, &tmp_mem); |
| 1116 | return ret; |
| 1117 | } |
| 1118 | |
| 1119 | static void |
| 1120 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) |
| 1121 | { |
| 1122 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1123 | struct nouveau_vma *vma; |
| 1124 | |
| 1125 | /* ttm can now (stupidly) pass the driver bos it didn't create... */ |
| 1126 | if (bo->destroy != nouveau_bo_del_ttm) |
| 1127 | return; |
| 1128 | |
| 1129 | list_for_each_entry(vma, &nvbo->vma_list, head) { |
| 1130 | if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && |
| 1131 | (new_mem->mem_type == TTM_PL_VRAM || |
| 1132 | nvbo->page_shift != vma->vm->vmm->lpg_shift)) { |
| 1133 | nouveau_vm_map(vma, new_mem->mm_node); |
| 1134 | } else { |
| 1135 | nouveau_vm_unmap(vma); |
| 1136 | } |
| 1137 | } |
| 1138 | } |
| 1139 | |
| 1140 | static int |
| 1141 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
| 1142 | struct nouveau_drm_tile **new_tile) |
| 1143 | { |
| 1144 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1145 | struct drm_device *dev = drm->dev; |
| 1146 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1147 | u64 offset = new_mem->start << PAGE_SHIFT; |
| 1148 | |
| 1149 | *new_tile = NULL; |
| 1150 | if (new_mem->mem_type != TTM_PL_VRAM) |
| 1151 | return 0; |
| 1152 | |
| 1153 | if (nv_device(drm->device)->card_type >= NV_10) { |
| 1154 | *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, |
| 1155 | nvbo->tile_mode, |
| 1156 | nvbo->tile_flags); |
| 1157 | } |
| 1158 | |
| 1159 | return 0; |
| 1160 | } |
| 1161 | |
| 1162 | static void |
| 1163 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, |
| 1164 | struct nouveau_drm_tile *new_tile, |
| 1165 | struct nouveau_drm_tile **old_tile) |
| 1166 | { |
| 1167 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1168 | struct drm_device *dev = drm->dev; |
| 1169 | |
| 1170 | nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); |
| 1171 | *old_tile = new_tile; |
| 1172 | } |
| 1173 | |
| 1174 | static int |
| 1175 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, |
| 1176 | bool no_wait_gpu, struct ttm_mem_reg *new_mem) |
| 1177 | { |
| 1178 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1179 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1180 | struct ttm_mem_reg *old_mem = &bo->mem; |
| 1181 | struct nouveau_drm_tile *new_tile = NULL; |
| 1182 | int ret = 0; |
| 1183 | |
| 1184 | if (nv_device(drm->device)->card_type < NV_50) { |
| 1185 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); |
| 1186 | if (ret) |
| 1187 | return ret; |
| 1188 | } |
| 1189 | |
| 1190 | /* Fake bo copy. */ |
| 1191 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
| 1192 | BUG_ON(bo->mem.mm_node != NULL); |
| 1193 | bo->mem = *new_mem; |
| 1194 | new_mem->mm_node = NULL; |
| 1195 | goto out; |
| 1196 | } |
| 1197 | |
| 1198 | /* CPU copy if we have no accelerated method available */ |
| 1199 | if (!drm->ttm.move) { |
| 1200 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
| 1201 | goto out; |
| 1202 | } |
| 1203 | |
| 1204 | /* Hardware assisted copy. */ |
| 1205 | if (new_mem->mem_type == TTM_PL_SYSTEM) |
| 1206 | ret = nouveau_bo_move_flipd(bo, evict, intr, |
| 1207 | no_wait_gpu, new_mem); |
| 1208 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
| 1209 | ret = nouveau_bo_move_flips(bo, evict, intr, |
| 1210 | no_wait_gpu, new_mem); |
| 1211 | else |
| 1212 | ret = nouveau_bo_move_m2mf(bo, evict, intr, |
| 1213 | no_wait_gpu, new_mem); |
| 1214 | |
| 1215 | if (!ret) |
| 1216 | goto out; |
| 1217 | |
| 1218 | /* Fallback to software copy. */ |
| 1219 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
| 1220 | |
| 1221 | out: |
| 1222 | if (nv_device(drm->device)->card_type < NV_50) { |
| 1223 | if (ret) |
| 1224 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); |
| 1225 | else |
| 1226 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); |
| 1227 | } |
| 1228 | |
| 1229 | return ret; |
| 1230 | } |
| 1231 | |
| 1232 | static int |
| 1233 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) |
| 1234 | { |
| 1235 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1236 | |
| 1237 | return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp); |
| 1238 | } |
| 1239 | |
| 1240 | static int |
| 1241 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 1242 | { |
| 1243 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; |
| 1244 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
| 1245 | struct nouveau_mem *node = mem->mm_node; |
| 1246 | struct drm_device *dev = drm->dev; |
| 1247 | int ret; |
| 1248 | |
| 1249 | mem->bus.addr = NULL; |
| 1250 | mem->bus.offset = 0; |
| 1251 | mem->bus.size = mem->num_pages << PAGE_SHIFT; |
| 1252 | mem->bus.base = 0; |
| 1253 | mem->bus.is_iomem = false; |
| 1254 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) |
| 1255 | return -EINVAL; |
| 1256 | switch (mem->mem_type) { |
| 1257 | case TTM_PL_SYSTEM: |
| 1258 | /* System memory */ |
| 1259 | return 0; |
| 1260 | case TTM_PL_TT: |
| 1261 | #if __OS_HAS_AGP |
| 1262 | if (drm->agp.stat == ENABLED) { |
| 1263 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| 1264 | mem->bus.base = drm->agp.base; |
| 1265 | mem->bus.is_iomem = !dev->agp->cant_use_aperture; |
| 1266 | } |
| 1267 | #endif |
| 1268 | if (!node->memtype) |
| 1269 | /* untiled */ |
| 1270 | break; |
| 1271 | /* fallthrough, tiled memory */ |
| 1272 | case TTM_PL_VRAM: |
| 1273 | mem->bus.offset = mem->start << PAGE_SHIFT; |
| 1274 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
| 1275 | mem->bus.is_iomem = true; |
| 1276 | if (nv_device(drm->device)->card_type >= NV_50) { |
| 1277 | struct nouveau_bar *bar = nouveau_bar(drm->device); |
| 1278 | |
| 1279 | ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, |
| 1280 | &node->bar_vma); |
| 1281 | if (ret) |
| 1282 | return ret; |
| 1283 | |
| 1284 | mem->bus.offset = node->bar_vma.offset; |
| 1285 | } |
| 1286 | break; |
| 1287 | default: |
| 1288 | return -EINVAL; |
| 1289 | } |
| 1290 | return 0; |
| 1291 | } |
| 1292 | |
| 1293 | static void |
| 1294 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
| 1295 | { |
| 1296 | struct nouveau_drm *drm = nouveau_bdev(bdev); |
| 1297 | struct nouveau_bar *bar = nouveau_bar(drm->device); |
| 1298 | struct nouveau_mem *node = mem->mm_node; |
| 1299 | |
| 1300 | if (!node->bar_vma.node) |
| 1301 | return; |
| 1302 | |
| 1303 | bar->unmap(bar, &node->bar_vma); |
| 1304 | } |
| 1305 | |
| 1306 | static int |
| 1307 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) |
| 1308 | { |
| 1309 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
| 1310 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
| 1311 | struct nouveau_device *device = nv_device(drm->device); |
| 1312 | u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT; |
| 1313 | int ret; |
| 1314 | |
| 1315 | /* as long as the bo isn't in vram, and isn't tiled, we've got |
| 1316 | * nothing to do here. |
| 1317 | */ |
| 1318 | if (bo->mem.mem_type != TTM_PL_VRAM) { |
| 1319 | if (nv_device(drm->device)->card_type < NV_50 || |
| 1320 | !nouveau_bo_tile_layout(nvbo)) |
| 1321 | return 0; |
| 1322 | |
| 1323 | if (bo->mem.mem_type == TTM_PL_SYSTEM) { |
| 1324 | nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); |
| 1325 | |
| 1326 | ret = nouveau_bo_validate(nvbo, false, false); |
| 1327 | if (ret) |
| 1328 | return ret; |
| 1329 | } |
| 1330 | return 0; |
| 1331 | } |
| 1332 | |
| 1333 | /* make sure bo is in mappable vram */ |
| 1334 | if (nv_device(drm->device)->card_type >= NV_50 || |
| 1335 | bo->mem.start + bo->mem.num_pages < mappable) |
| 1336 | return 0; |
| 1337 | |
| 1338 | |
| 1339 | nvbo->placement.fpfn = 0; |
| 1340 | nvbo->placement.lpfn = mappable; |
| 1341 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); |
| 1342 | return nouveau_bo_validate(nvbo, false, false); |
| 1343 | } |
| 1344 | |
| 1345 | static int |
| 1346 | nouveau_ttm_tt_populate(struct ttm_tt *ttm) |
| 1347 | { |
| 1348 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
| 1349 | struct nouveau_drm *drm; |
| 1350 | struct drm_device *dev; |
| 1351 | unsigned i; |
| 1352 | int r; |
| 1353 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1354 | |
| 1355 | if (ttm->state != tt_unpopulated) |
| 1356 | return 0; |
| 1357 | |
| 1358 | if (slave && ttm->sg) { |
| 1359 | /* make userspace faulting work */ |
| 1360 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, |
| 1361 | ttm_dma->dma_address, ttm->num_pages); |
| 1362 | ttm->state = tt_unbound; |
| 1363 | return 0; |
| 1364 | } |
| 1365 | |
| 1366 | drm = nouveau_bdev(ttm->bdev); |
| 1367 | dev = drm->dev; |
| 1368 | |
| 1369 | #if __OS_HAS_AGP |
| 1370 | if (drm->agp.stat == ENABLED) { |
| 1371 | return ttm_agp_tt_populate(ttm); |
| 1372 | } |
| 1373 | #endif |
| 1374 | |
| 1375 | #ifdef CONFIG_SWIOTLB |
| 1376 | if (swiotlb_nr_tbl()) { |
| 1377 | return ttm_dma_populate((void *)ttm, dev->dev); |
| 1378 | } |
| 1379 | #endif |
| 1380 | |
| 1381 | r = ttm_pool_populate(ttm); |
| 1382 | if (r) { |
| 1383 | return r; |
| 1384 | } |
| 1385 | |
| 1386 | for (i = 0; i < ttm->num_pages; i++) { |
| 1387 | ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], |
| 1388 | 0, PAGE_SIZE, |
| 1389 | PCI_DMA_BIDIRECTIONAL); |
| 1390 | if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) { |
| 1391 | while (--i) { |
| 1392 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], |
| 1393 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 1394 | ttm_dma->dma_address[i] = 0; |
| 1395 | } |
| 1396 | ttm_pool_unpopulate(ttm); |
| 1397 | return -EFAULT; |
| 1398 | } |
| 1399 | } |
| 1400 | return 0; |
| 1401 | } |
| 1402 | |
| 1403 | static void |
| 1404 | nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) |
| 1405 | { |
| 1406 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
| 1407 | struct nouveau_drm *drm; |
| 1408 | struct drm_device *dev; |
| 1409 | unsigned i; |
| 1410 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
| 1411 | |
| 1412 | if (slave) |
| 1413 | return; |
| 1414 | |
| 1415 | drm = nouveau_bdev(ttm->bdev); |
| 1416 | dev = drm->dev; |
| 1417 | |
| 1418 | #if __OS_HAS_AGP |
| 1419 | if (drm->agp.stat == ENABLED) { |
| 1420 | ttm_agp_tt_unpopulate(ttm); |
| 1421 | return; |
| 1422 | } |
| 1423 | #endif |
| 1424 | |
| 1425 | #ifdef CONFIG_SWIOTLB |
| 1426 | if (swiotlb_nr_tbl()) { |
| 1427 | ttm_dma_unpopulate((void *)ttm, dev->dev); |
| 1428 | return; |
| 1429 | } |
| 1430 | #endif |
| 1431 | |
| 1432 | for (i = 0; i < ttm->num_pages; i++) { |
| 1433 | if (ttm_dma->dma_address[i]) { |
| 1434 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], |
| 1435 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
| 1436 | } |
| 1437 | } |
| 1438 | |
| 1439 | ttm_pool_unpopulate(ttm); |
| 1440 | } |
| 1441 | |
| 1442 | void |
| 1443 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) |
| 1444 | { |
| 1445 | struct nouveau_fence *new_fence = nouveau_fence_ref(fence); |
| 1446 | struct nouveau_fence *old_fence = NULL; |
| 1447 | |
| 1448 | spin_lock(&nvbo->bo.bdev->fence_lock); |
| 1449 | old_fence = nvbo->bo.sync_obj; |
| 1450 | nvbo->bo.sync_obj = new_fence; |
| 1451 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
| 1452 | |
| 1453 | nouveau_fence_unref(&old_fence); |
| 1454 | } |
| 1455 | |
| 1456 | static void |
| 1457 | nouveau_bo_fence_unref(void **sync_obj) |
| 1458 | { |
| 1459 | nouveau_fence_unref((struct nouveau_fence **)sync_obj); |
| 1460 | } |
| 1461 | |
| 1462 | static void * |
| 1463 | nouveau_bo_fence_ref(void *sync_obj) |
| 1464 | { |
| 1465 | return nouveau_fence_ref(sync_obj); |
| 1466 | } |
| 1467 | |
| 1468 | static bool |
| 1469 | nouveau_bo_fence_signalled(void *sync_obj) |
| 1470 | { |
| 1471 | return nouveau_fence_done(sync_obj); |
| 1472 | } |
| 1473 | |
| 1474 | static int |
| 1475 | nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) |
| 1476 | { |
| 1477 | return nouveau_fence_wait(sync_obj, lazy, intr); |
| 1478 | } |
| 1479 | |
| 1480 | static int |
| 1481 | nouveau_bo_fence_flush(void *sync_obj) |
| 1482 | { |
| 1483 | return 0; |
| 1484 | } |
| 1485 | |
| 1486 | struct ttm_bo_driver nouveau_bo_driver = { |
| 1487 | .ttm_tt_create = &nouveau_ttm_tt_create, |
| 1488 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
| 1489 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, |
| 1490 | .invalidate_caches = nouveau_bo_invalidate_caches, |
| 1491 | .init_mem_type = nouveau_bo_init_mem_type, |
| 1492 | .evict_flags = nouveau_bo_evict_flags, |
| 1493 | .move_notify = nouveau_bo_move_ntfy, |
| 1494 | .move = nouveau_bo_move, |
| 1495 | .verify_access = nouveau_bo_verify_access, |
| 1496 | .sync_obj_signaled = nouveau_bo_fence_signalled, |
| 1497 | .sync_obj_wait = nouveau_bo_fence_wait, |
| 1498 | .sync_obj_flush = nouveau_bo_fence_flush, |
| 1499 | .sync_obj_unref = nouveau_bo_fence_unref, |
| 1500 | .sync_obj_ref = nouveau_bo_fence_ref, |
| 1501 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
| 1502 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, |
| 1503 | .io_mem_free = &nouveau_ttm_io_mem_free, |
| 1504 | }; |
| 1505 | |
| 1506 | struct nouveau_vma * |
| 1507 | nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) |
| 1508 | { |
| 1509 | struct nouveau_vma *vma; |
| 1510 | list_for_each_entry(vma, &nvbo->vma_list, head) { |
| 1511 | if (vma->vm == vm) |
| 1512 | return vma; |
| 1513 | } |
| 1514 | |
| 1515 | return NULL; |
| 1516 | } |
| 1517 | |
| 1518 | int |
| 1519 | nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, |
| 1520 | struct nouveau_vma *vma) |
| 1521 | { |
| 1522 | const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; |
| 1523 | int ret; |
| 1524 | |
| 1525 | ret = nouveau_vm_get(vm, size, nvbo->page_shift, |
| 1526 | NV_MEM_ACCESS_RW, vma); |
| 1527 | if (ret) |
| 1528 | return ret; |
| 1529 | |
| 1530 | if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && |
| 1531 | (nvbo->bo.mem.mem_type == TTM_PL_VRAM || |
| 1532 | nvbo->page_shift != vma->vm->vmm->lpg_shift)) |
| 1533 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); |
| 1534 | |
| 1535 | list_add_tail(&vma->head, &nvbo->vma_list); |
| 1536 | vma->refcount = 1; |
| 1537 | return 0; |
| 1538 | } |
| 1539 | |
| 1540 | void |
| 1541 | nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) |
| 1542 | { |
| 1543 | if (vma->node) { |
| 1544 | if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) |
| 1545 | nouveau_vm_unmap(vma); |
| 1546 | nouveau_vm_put(vma); |
| 1547 | list_del(&vma->head); |
| 1548 | } |
| 1549 | } |