Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
30 | #include "drmP.h" | |
31 | ||
32 | #include "nouveau_drm.h" | |
33 | #include "nouveau_drv.h" | |
34 | #include "nouveau_dma.h" | |
f869ef88 BS |
35 | #include "nouveau_mm.h" |
36 | #include "nouveau_vm.h" | |
6ee73861 | 37 | |
a510604d | 38 | #include <linux/log2.h> |
5a0e3ad6 | 39 | #include <linux/slab.h> |
a510604d | 40 | |
6ee73861 BS |
41 | static void |
42 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
43 | { | |
44 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
a0af9add | 45 | struct drm_device *dev = dev_priv->dev; |
6ee73861 BS |
46 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
47 | ||
6ee73861 BS |
48 | if (unlikely(nvbo->gem)) |
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | |
50 | ||
a5cf68b0 | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
4c136142 | 52 | nouveau_vm_put(&nvbo->vma); |
6ee73861 BS |
53 | kfree(nvbo); |
54 | } | |
55 | ||
a0af9add FJ |
56 | static void |
57 | nouveau_bo_fixup_align(struct drm_device *dev, | |
58 | uint32_t tile_mode, uint32_t tile_flags, | |
59 | int *align, int *size) | |
60 | { | |
61 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
62 | ||
573a2a37 | 63 | if (dev_priv->card_type < NV_50) { |
a0af9add FJ |
64 | if (tile_mode) { |
65 | if (dev_priv->chipset >= 0x40) { | |
66 | *align = 65536; | |
67 | *size = roundup(*size, 64 * tile_mode); | |
68 | ||
69 | } else if (dev_priv->chipset >= 0x30) { | |
70 | *align = 32768; | |
71 | *size = roundup(*size, 64 * tile_mode); | |
72 | ||
73 | } else if (dev_priv->chipset >= 0x20) { | |
74 | *align = 16384; | |
75 | *size = roundup(*size, 64 * tile_mode); | |
76 | ||
77 | } else if (dev_priv->chipset >= 0x10) { | |
78 | *align = 16384; | |
79 | *size = roundup(*size, 32 * tile_mode); | |
80 | } | |
81 | } | |
82 | } | |
83 | ||
1c7059e4 MM |
84 | /* ALIGN works only on powers of two. */ |
85 | *size = roundup(*size, PAGE_SIZE); | |
a0af9add | 86 | if (dev_priv->card_type == NV_50) { |
1c7059e4 | 87 | *size = roundup(*size, 65536); |
a0af9add FJ |
88 | *align = max(65536, *align); |
89 | } | |
90 | } | |
91 | ||
6ee73861 BS |
92 | int |
93 | nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |
94 | int size, int align, uint32_t flags, uint32_t tile_mode, | |
95 | uint32_t tile_flags, bool no_vm, bool mappable, | |
96 | struct nouveau_bo **pnvbo) | |
97 | { | |
98 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
99 | struct nouveau_bo *nvbo; | |
8dea4a19 | 100 | int ret = 0; |
6ee73861 BS |
101 | |
102 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | |
103 | if (!nvbo) | |
104 | return -ENOMEM; | |
105 | INIT_LIST_HEAD(&nvbo->head); | |
106 | INIT_LIST_HEAD(&nvbo->entry); | |
107 | nvbo->mappable = mappable; | |
108 | nvbo->no_vm = no_vm; | |
109 | nvbo->tile_mode = tile_mode; | |
110 | nvbo->tile_flags = tile_flags; | |
699ddfd9 | 111 | nvbo->bo.bdev = &dev_priv->ttm.bdev; |
6ee73861 | 112 | |
f13b3263 FJ |
113 | nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), |
114 | &align, &size); | |
6ee73861 BS |
115 | align >>= PAGE_SHIFT; |
116 | ||
4c136142 BS |
117 | if (!nvbo->no_vm && dev_priv->chan_vm) { |
118 | ret = nouveau_vm_get(dev_priv->chan_vm, size, 16, | |
119 | NV_MEM_ACCESS_RW, &nvbo->vma); | |
120 | if (ret) { | |
121 | kfree(nvbo); | |
122 | return ret; | |
123 | } | |
124 | } | |
125 | ||
78ad0f7b | 126 | nouveau_bo_placement_set(nvbo, flags, 0); |
6ee73861 BS |
127 | |
128 | nvbo->channel = chan; | |
6ee73861 BS |
129 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
130 | ttm_bo_type_device, &nvbo->placement, align, 0, | |
131 | false, NULL, size, nouveau_bo_del_ttm); | |
6ee73861 BS |
132 | if (ret) { |
133 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
134 | return ret; | |
135 | } | |
90af89b9 | 136 | nvbo->channel = NULL; |
6ee73861 | 137 | |
4c136142 BS |
138 | if (nvbo->vma.node) { |
139 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | |
140 | nvbo->bo.offset = nvbo->vma.offset; | |
141 | } | |
142 | ||
6ee73861 BS |
143 | *pnvbo = nvbo; |
144 | return 0; | |
145 | } | |
146 | ||
78ad0f7b FJ |
147 | static void |
148 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |
149 | { | |
150 | *n = 0; | |
151 | ||
152 | if (type & TTM_PL_FLAG_VRAM) | |
153 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; | |
154 | if (type & TTM_PL_FLAG_TT) | |
155 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; | |
156 | if (type & TTM_PL_FLAG_SYSTEM) | |
157 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | |
158 | } | |
159 | ||
699ddfd9 FJ |
160 | static void |
161 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | |
162 | { | |
163 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
164 | ||
165 | if (dev_priv->card_type == NV_10 && | |
166 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { | |
167 | /* | |
168 | * Make sure that the color and depth buffers are handled | |
169 | * by independent memory controller units. Up to a 9x | |
170 | * speed up when alpha-blending and depth-test are enabled | |
171 | * at the same time. | |
172 | */ | |
173 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | |
174 | ||
175 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { | |
176 | nvbo->placement.fpfn = vram_pages / 2; | |
177 | nvbo->placement.lpfn = ~0; | |
178 | } else { | |
179 | nvbo->placement.fpfn = 0; | |
180 | nvbo->placement.lpfn = vram_pages / 2; | |
181 | } | |
182 | } | |
183 | } | |
184 | ||
6ee73861 | 185 | void |
78ad0f7b | 186 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
6ee73861 | 187 | { |
78ad0f7b FJ |
188 | struct ttm_placement *pl = &nvbo->placement; |
189 | uint32_t flags = TTM_PL_MASK_CACHING | | |
190 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); | |
191 | ||
192 | pl->placement = nvbo->placements; | |
193 | set_placement_list(nvbo->placements, &pl->num_placement, | |
194 | type, flags); | |
195 | ||
196 | pl->busy_placement = nvbo->busy_placements; | |
197 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | |
198 | type | busy, flags); | |
699ddfd9 FJ |
199 | |
200 | set_placement_range(nvbo, type); | |
6ee73861 BS |
201 | } |
202 | ||
203 | int | |
204 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |
205 | { | |
206 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
207 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 208 | int ret; |
6ee73861 BS |
209 | |
210 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | |
211 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | |
212 | "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, | |
213 | 1 << bo->mem.mem_type, memtype); | |
214 | return -EINVAL; | |
215 | } | |
216 | ||
217 | if (nvbo->pin_refcnt++) | |
218 | return 0; | |
219 | ||
220 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
221 | if (ret) | |
222 | goto out; | |
223 | ||
78ad0f7b | 224 | nouveau_bo_placement_set(nvbo, memtype, 0); |
6ee73861 | 225 | |
7a45d764 | 226 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
227 | if (ret == 0) { |
228 | switch (bo->mem.mem_type) { | |
229 | case TTM_PL_VRAM: | |
230 | dev_priv->fb_aper_free -= bo->mem.size; | |
231 | break; | |
232 | case TTM_PL_TT: | |
233 | dev_priv->gart_info.aper_free -= bo->mem.size; | |
234 | break; | |
235 | default: | |
236 | break; | |
237 | } | |
238 | } | |
239 | ttm_bo_unreserve(bo); | |
240 | out: | |
241 | if (unlikely(ret)) | |
242 | nvbo->pin_refcnt--; | |
243 | return ret; | |
244 | } | |
245 | ||
246 | int | |
247 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
248 | { | |
249 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
250 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 251 | int ret; |
6ee73861 BS |
252 | |
253 | if (--nvbo->pin_refcnt) | |
254 | return 0; | |
255 | ||
256 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
257 | if (ret) | |
258 | return ret; | |
259 | ||
78ad0f7b | 260 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
6ee73861 | 261 | |
7a45d764 | 262 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
263 | if (ret == 0) { |
264 | switch (bo->mem.mem_type) { | |
265 | case TTM_PL_VRAM: | |
266 | dev_priv->fb_aper_free += bo->mem.size; | |
267 | break; | |
268 | case TTM_PL_TT: | |
269 | dev_priv->gart_info.aper_free += bo->mem.size; | |
270 | break; | |
271 | default: | |
272 | break; | |
273 | } | |
274 | } | |
275 | ||
276 | ttm_bo_unreserve(bo); | |
277 | return ret; | |
278 | } | |
279 | ||
280 | int | |
281 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
282 | { | |
283 | int ret; | |
284 | ||
285 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
286 | if (ret) | |
287 | return ret; | |
288 | ||
289 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); | |
290 | ttm_bo_unreserve(&nvbo->bo); | |
291 | return ret; | |
292 | } | |
293 | ||
294 | void | |
295 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
296 | { | |
9d59e8a1 BS |
297 | if (nvbo) |
298 | ttm_bo_kunmap(&nvbo->kmap); | |
6ee73861 BS |
299 | } |
300 | ||
7a45d764 BS |
301 | int |
302 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | |
303 | bool no_wait_reserve, bool no_wait_gpu) | |
304 | { | |
305 | int ret; | |
306 | ||
307 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, | |
308 | no_wait_reserve, no_wait_gpu); | |
309 | if (ret) | |
310 | return ret; | |
311 | ||
4c136142 BS |
312 | if (nvbo->vma.node) { |
313 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | |
314 | nvbo->bo.offset = nvbo->vma.offset; | |
315 | } | |
316 | ||
7a45d764 BS |
317 | return 0; |
318 | } | |
319 | ||
6ee73861 BS |
320 | u16 |
321 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | |
322 | { | |
323 | bool is_iomem; | |
324 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
325 | mem = &mem[index]; | |
326 | if (is_iomem) | |
327 | return ioread16_native((void __force __iomem *)mem); | |
328 | else | |
329 | return *mem; | |
330 | } | |
331 | ||
332 | void | |
333 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
334 | { | |
335 | bool is_iomem; | |
336 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
337 | mem = &mem[index]; | |
338 | if (is_iomem) | |
339 | iowrite16_native(val, (void __force __iomem *)mem); | |
340 | else | |
341 | *mem = val; | |
342 | } | |
343 | ||
344 | u32 | |
345 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
346 | { | |
347 | bool is_iomem; | |
348 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
349 | mem = &mem[index]; | |
350 | if (is_iomem) | |
351 | return ioread32_native((void __force __iomem *)mem); | |
352 | else | |
353 | return *mem; | |
354 | } | |
355 | ||
356 | void | |
357 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
358 | { | |
359 | bool is_iomem; | |
360 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
361 | mem = &mem[index]; | |
362 | if (is_iomem) | |
363 | iowrite32_native(val, (void __force __iomem *)mem); | |
364 | else | |
365 | *mem = val; | |
366 | } | |
367 | ||
368 | static struct ttm_backend * | |
369 | nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) | |
370 | { | |
371 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
372 | struct drm_device *dev = dev_priv->dev; | |
373 | ||
374 | switch (dev_priv->gart_info.type) { | |
b694dfb2 | 375 | #if __OS_HAS_AGP |
6ee73861 BS |
376 | case NOUVEAU_GART_AGP: |
377 | return ttm_agp_backend_init(bdev, dev->agp->bridge); | |
b694dfb2 | 378 | #endif |
6ee73861 BS |
379 | case NOUVEAU_GART_SGDMA: |
380 | return nouveau_sgdma_init_ttm(dev); | |
381 | default: | |
382 | NV_ERROR(dev, "Unknown GART type %d\n", | |
383 | dev_priv->gart_info.type); | |
384 | break; | |
385 | } | |
386 | ||
387 | return NULL; | |
388 | } | |
389 | ||
390 | static int | |
391 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
392 | { | |
393 | /* We'll do this from user space. */ | |
394 | return 0; | |
395 | } | |
396 | ||
397 | static int | |
398 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
399 | struct ttm_mem_type_manager *man) | |
400 | { | |
401 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
402 | struct drm_device *dev = dev_priv->dev; | |
403 | ||
404 | switch (type) { | |
405 | case TTM_PL_SYSTEM: | |
406 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
407 | man->available_caching = TTM_PL_MASK_CACHING; | |
408 | man->default_caching = TTM_PL_FLAG_CACHED; | |
409 | break; | |
410 | case TTM_PL_VRAM: | |
f869ef88 | 411 | if (dev_priv->card_type == NV_50) { |
573a2a37 | 412 | man->func = &nouveau_vram_manager; |
f869ef88 BS |
413 | man->io_reserve_fastpath = false; |
414 | man->use_io_reserve_lru = true; | |
415 | } else { | |
573a2a37 | 416 | man->func = &ttm_bo_manager_func; |
f869ef88 | 417 | } |
6ee73861 | 418 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
f32f02fd | 419 | TTM_MEMTYPE_FLAG_MAPPABLE; |
6ee73861 BS |
420 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
421 | TTM_PL_FLAG_WC; | |
422 | man->default_caching = TTM_PL_FLAG_WC; | |
4c136142 | 423 | man->gpu_offset = 0; |
6ee73861 BS |
424 | break; |
425 | case TTM_PL_TT: | |
d961db75 | 426 | man->func = &ttm_bo_manager_func; |
6ee73861 BS |
427 | switch (dev_priv->gart_info.type) { |
428 | case NOUVEAU_GART_AGP: | |
f32f02fd | 429 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
a3d487ea FJ |
430 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
431 | TTM_PL_FLAG_WC; | |
432 | man->default_caching = TTM_PL_FLAG_WC; | |
6ee73861 BS |
433 | break; |
434 | case NOUVEAU_GART_SGDMA: | |
435 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | | |
436 | TTM_MEMTYPE_FLAG_CMA; | |
437 | man->available_caching = TTM_PL_MASK_CACHING; | |
438 | man->default_caching = TTM_PL_FLAG_CACHED; | |
439 | break; | |
440 | default: | |
441 | NV_ERROR(dev, "Unknown GART type: %d\n", | |
442 | dev_priv->gart_info.type); | |
443 | return -EINVAL; | |
444 | } | |
6ee73861 BS |
445 | man->gpu_offset = dev_priv->vm_gart_base; |
446 | break; | |
447 | default: | |
448 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | |
449 | return -EINVAL; | |
450 | } | |
451 | return 0; | |
452 | } | |
453 | ||
454 | static void | |
455 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
456 | { | |
457 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
458 | ||
459 | switch (bo->mem.mem_type) { | |
22fbd538 | 460 | case TTM_PL_VRAM: |
78ad0f7b FJ |
461 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
462 | TTM_PL_FLAG_SYSTEM); | |
22fbd538 | 463 | break; |
6ee73861 | 464 | default: |
78ad0f7b | 465 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
6ee73861 BS |
466 | break; |
467 | } | |
22fbd538 FJ |
468 | |
469 | *pl = nvbo->placement; | |
6ee73861 BS |
470 | } |
471 | ||
472 | ||
473 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | |
474 | * TTM_PL_{VRAM,TT} directly. | |
475 | */ | |
a0af9add | 476 | |
6ee73861 BS |
477 | static int |
478 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |
9d87fa21 JG |
479 | struct nouveau_bo *nvbo, bool evict, |
480 | bool no_wait_reserve, bool no_wait_gpu, | |
6ee73861 BS |
481 | struct ttm_mem_reg *new_mem) |
482 | { | |
483 | struct nouveau_fence *fence = NULL; | |
484 | int ret; | |
485 | ||
486 | ret = nouveau_fence_new(chan, &fence, true); | |
487 | if (ret) | |
488 | return ret; | |
489 | ||
64798817 | 490 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, |
311ab694 | 491 | no_wait_reserve, no_wait_gpu, new_mem); |
382d62e5 | 492 | nouveau_fence_unref(&fence); |
6ee73861 BS |
493 | return ret; |
494 | } | |
495 | ||
496 | static inline uint32_t | |
f1ab0cc9 BS |
497 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, |
498 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | |
6ee73861 | 499 | { |
f1ab0cc9 BS |
500 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
501 | ||
502 | if (nvbo->no_vm) { | |
6ee73861 BS |
503 | if (mem->mem_type == TTM_PL_TT) |
504 | return NvDmaGART; | |
505 | return NvDmaVRAM; | |
506 | } | |
507 | ||
508 | if (mem->mem_type == TTM_PL_TT) | |
509 | return chan->gart_handle; | |
510 | return chan->vram_handle; | |
511 | } | |
512 | ||
513 | static int | |
f1ab0cc9 BS |
514 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
515 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
6ee73861 | 516 | { |
6ee73861 | 517 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
f1ab0cc9 BS |
518 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
519 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | |
520 | u64 src_offset, dst_offset; | |
6ee73861 BS |
521 | int ret; |
522 | ||
d961db75 BS |
523 | src_offset = old_mem->start << PAGE_SHIFT; |
524 | dst_offset = new_mem->start << PAGE_SHIFT; | |
f1ab0cc9 BS |
525 | if (!nvbo->no_vm) { |
526 | if (old_mem->mem_type == TTM_PL_VRAM) | |
4c136142 | 527 | src_offset = nvbo->vma.offset; |
6ee73861 | 528 | else |
f1ab0cc9 BS |
529 | src_offset += dev_priv->vm_gart_base; |
530 | ||
531 | if (new_mem->mem_type == TTM_PL_VRAM) | |
4c136142 | 532 | dst_offset = nvbo->vma.offset; |
f1ab0cc9 BS |
533 | else |
534 | dst_offset += dev_priv->vm_gart_base; | |
6ee73861 BS |
535 | } |
536 | ||
537 | ret = RING_SPACE(chan, 3); | |
538 | if (ret) | |
539 | return ret; | |
6ee73861 | 540 | |
f1ab0cc9 BS |
541 | BEGIN_RING(chan, NvSubM2MF, 0x0184, 2); |
542 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); | |
543 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | |
544 | ||
545 | while (length) { | |
546 | u32 amount, stride, height; | |
547 | ||
5220b3c1 BS |
548 | amount = min(length, (u64)(4 * 1024 * 1024)); |
549 | stride = 16 * 4; | |
f1ab0cc9 BS |
550 | height = amount / stride; |
551 | ||
f13b3263 FJ |
552 | if (new_mem->mem_type == TTM_PL_VRAM && |
553 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
554 | ret = RING_SPACE(chan, 8); |
555 | if (ret) | |
556 | return ret; | |
557 | ||
558 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); | |
559 | OUT_RING (chan, 0); | |
5220b3c1 | 560 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
561 | OUT_RING (chan, stride); |
562 | OUT_RING (chan, height); | |
563 | OUT_RING (chan, 1); | |
564 | OUT_RING (chan, 0); | |
565 | OUT_RING (chan, 0); | |
566 | } else { | |
567 | ret = RING_SPACE(chan, 2); | |
568 | if (ret) | |
569 | return ret; | |
570 | ||
571 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); | |
572 | OUT_RING (chan, 1); | |
573 | } | |
f13b3263 FJ |
574 | if (old_mem->mem_type == TTM_PL_VRAM && |
575 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
576 | ret = RING_SPACE(chan, 8); |
577 | if (ret) | |
578 | return ret; | |
579 | ||
580 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); | |
581 | OUT_RING (chan, 0); | |
5220b3c1 | 582 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
583 | OUT_RING (chan, stride); |
584 | OUT_RING (chan, height); | |
585 | OUT_RING (chan, 1); | |
586 | OUT_RING (chan, 0); | |
587 | OUT_RING (chan, 0); | |
588 | } else { | |
589 | ret = RING_SPACE(chan, 2); | |
590 | if (ret) | |
591 | return ret; | |
592 | ||
593 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); | |
594 | OUT_RING (chan, 1); | |
595 | } | |
596 | ||
597 | ret = RING_SPACE(chan, 14); | |
6ee73861 BS |
598 | if (ret) |
599 | return ret; | |
f1ab0cc9 BS |
600 | |
601 | BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); | |
602 | OUT_RING (chan, upper_32_bits(src_offset)); | |
603 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
604 | BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); | |
605 | OUT_RING (chan, lower_32_bits(src_offset)); | |
606 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
607 | OUT_RING (chan, stride); | |
608 | OUT_RING (chan, stride); | |
609 | OUT_RING (chan, stride); | |
610 | OUT_RING (chan, height); | |
611 | OUT_RING (chan, 0x00000101); | |
612 | OUT_RING (chan, 0x00000000); | |
613 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); | |
614 | OUT_RING (chan, 0); | |
615 | ||
616 | length -= amount; | |
617 | src_offset += amount; | |
618 | dst_offset += amount; | |
6ee73861 BS |
619 | } |
620 | ||
f1ab0cc9 BS |
621 | return 0; |
622 | } | |
623 | ||
624 | static int | |
625 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
626 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
627 | { | |
d961db75 BS |
628 | u32 src_offset = old_mem->start << PAGE_SHIFT; |
629 | u32 dst_offset = new_mem->start << PAGE_SHIFT; | |
f1ab0cc9 BS |
630 | u32 page_count = new_mem->num_pages; |
631 | int ret; | |
632 | ||
633 | ret = RING_SPACE(chan, 3); | |
634 | if (ret) | |
635 | return ret; | |
636 | ||
637 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); | |
638 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); | |
639 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | |
640 | ||
6ee73861 BS |
641 | page_count = new_mem->num_pages; |
642 | while (page_count) { | |
643 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
644 | ||
6ee73861 BS |
645 | ret = RING_SPACE(chan, 11); |
646 | if (ret) | |
647 | return ret; | |
f1ab0cc9 | 648 | |
6ee73861 BS |
649 | BEGIN_RING(chan, NvSubM2MF, |
650 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | |
f1ab0cc9 BS |
651 | OUT_RING (chan, src_offset); |
652 | OUT_RING (chan, dst_offset); | |
653 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
654 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
655 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
656 | OUT_RING (chan, line_count); | |
657 | OUT_RING (chan, 0x00000101); | |
658 | OUT_RING (chan, 0x00000000); | |
6ee73861 | 659 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
f1ab0cc9 | 660 | OUT_RING (chan, 0); |
6ee73861 BS |
661 | |
662 | page_count -= line_count; | |
663 | src_offset += (PAGE_SIZE * line_count); | |
664 | dst_offset += (PAGE_SIZE * line_count); | |
665 | } | |
666 | ||
f1ab0cc9 BS |
667 | return 0; |
668 | } | |
669 | ||
670 | static int | |
671 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |
672 | bool no_wait_reserve, bool no_wait_gpu, | |
673 | struct ttm_mem_reg *new_mem) | |
674 | { | |
675 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
676 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
677 | struct nouveau_channel *chan; | |
678 | int ret; | |
679 | ||
680 | chan = nvbo->channel; | |
6a6b73f2 | 681 | if (!chan || nvbo->no_vm) { |
f1ab0cc9 | 682 | chan = dev_priv->channel; |
e419cf09 | 683 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); |
6a6b73f2 | 684 | } |
f1ab0cc9 BS |
685 | |
686 | if (dev_priv->card_type < NV_50) | |
687 | ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | |
688 | else | |
689 | ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | |
6a6b73f2 BS |
690 | if (ret == 0) { |
691 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, | |
692 | no_wait_reserve, | |
693 | no_wait_gpu, new_mem); | |
694 | } | |
f1ab0cc9 | 695 | |
6a6b73f2 BS |
696 | if (chan == dev_priv->channel) |
697 | mutex_unlock(&chan->mutex); | |
698 | return ret; | |
6ee73861 BS |
699 | } |
700 | ||
701 | static int | |
702 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
703 | bool no_wait_reserve, bool no_wait_gpu, |
704 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
705 | { |
706 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
707 | struct ttm_placement placement; | |
708 | struct ttm_mem_reg tmp_mem; | |
709 | int ret; | |
710 | ||
711 | placement.fpfn = placement.lpfn = 0; | |
712 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 713 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
714 | |
715 | tmp_mem = *new_mem; | |
716 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 717 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
718 | if (ret) |
719 | return ret; | |
720 | ||
721 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | |
722 | if (ret) | |
723 | goto out; | |
724 | ||
9d87fa21 | 725 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
726 | if (ret) |
727 | goto out; | |
728 | ||
9d87fa21 | 729 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 730 | out: |
42311ff9 | 731 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
732 | return ret; |
733 | } | |
734 | ||
735 | static int | |
736 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
737 | bool no_wait_reserve, bool no_wait_gpu, |
738 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
739 | { |
740 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
741 | struct ttm_placement placement; | |
742 | struct ttm_mem_reg tmp_mem; | |
743 | int ret; | |
744 | ||
745 | placement.fpfn = placement.lpfn = 0; | |
746 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 747 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
748 | |
749 | tmp_mem = *new_mem; | |
750 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 751 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
752 | if (ret) |
753 | return ret; | |
754 | ||
9d87fa21 | 755 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
756 | if (ret) |
757 | goto out; | |
758 | ||
9d87fa21 | 759 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 BS |
760 | if (ret) |
761 | goto out; | |
762 | ||
763 | out: | |
42311ff9 | 764 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
765 | return ret; |
766 | } | |
767 | ||
768 | static int | |
a0af9add FJ |
769 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
770 | struct nouveau_tile_reg **new_tile) | |
6ee73861 BS |
771 | { |
772 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
6ee73861 | 773 | struct drm_device *dev = dev_priv->dev; |
a0af9add FJ |
774 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
775 | uint64_t offset; | |
6ee73861 | 776 | |
a0af9add FJ |
777 | if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { |
778 | /* Nothing to do. */ | |
779 | *new_tile = NULL; | |
780 | return 0; | |
781 | } | |
782 | ||
d961db75 | 783 | offset = new_mem->start << PAGE_SHIFT; |
6ee73861 | 784 | |
4c136142 BS |
785 | if (dev_priv->chan_vm) { |
786 | nouveau_vm_map(&nvbo->vma, new_mem->mm_node); | |
a0af9add FJ |
787 | } else if (dev_priv->card_type >= NV_10) { |
788 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, | |
a5cf68b0 FJ |
789 | nvbo->tile_mode, |
790 | nvbo->tile_flags); | |
6ee73861 BS |
791 | } |
792 | ||
a0af9add FJ |
793 | return 0; |
794 | } | |
795 | ||
796 | static void | |
797 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |
798 | struct nouveau_tile_reg *new_tile, | |
799 | struct nouveau_tile_reg **old_tile) | |
800 | { | |
801 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
802 | struct drm_device *dev = dev_priv->dev; | |
803 | ||
804 | if (dev_priv->card_type >= NV_10 && | |
805 | dev_priv->card_type < NV_50) { | |
a5cf68b0 | 806 | nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); |
a0af9add FJ |
807 | *old_tile = new_tile; |
808 | } | |
809 | } | |
810 | ||
811 | static int | |
812 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
813 | bool no_wait_reserve, bool no_wait_gpu, |
814 | struct ttm_mem_reg *new_mem) | |
a0af9add FJ |
815 | { |
816 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
817 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
818 | struct ttm_mem_reg *old_mem = &bo->mem; | |
819 | struct nouveau_tile_reg *new_tile = NULL; | |
820 | int ret = 0; | |
821 | ||
822 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | |
823 | if (ret) | |
824 | return ret; | |
825 | ||
a0af9add | 826 | /* Fake bo copy. */ |
6ee73861 BS |
827 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
828 | BUG_ON(bo->mem.mm_node != NULL); | |
829 | bo->mem = *new_mem; | |
830 | new_mem->mm_node = NULL; | |
a0af9add | 831 | goto out; |
6ee73861 BS |
832 | } |
833 | ||
b8a6a804 BS |
834 | /* Software copy if the card isn't up and running yet. */ |
835 | if (!dev_priv->channel) { | |
836 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | |
837 | goto out; | |
838 | } | |
839 | ||
a0af9add FJ |
840 | /* Hardware assisted copy. */ |
841 | if (new_mem->mem_type == TTM_PL_SYSTEM) | |
9d87fa21 | 842 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 843 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
9d87fa21 | 844 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 845 | else |
9d87fa21 | 846 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 847 | |
a0af9add FJ |
848 | if (!ret) |
849 | goto out; | |
850 | ||
851 | /* Fallback to software copy. */ | |
9d87fa21 | 852 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add FJ |
853 | |
854 | out: | |
855 | if (ret) | |
856 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | |
857 | else | |
858 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | |
859 | ||
860 | return ret; | |
6ee73861 BS |
861 | } |
862 | ||
863 | static int | |
864 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
865 | { | |
866 | return 0; | |
867 | } | |
868 | ||
f32f02fd JG |
869 | static int |
870 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
871 | { | |
872 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
873 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
874 | struct drm_device *dev = dev_priv->dev; | |
f869ef88 | 875 | int ret; |
f32f02fd JG |
876 | |
877 | mem->bus.addr = NULL; | |
878 | mem->bus.offset = 0; | |
879 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
880 | mem->bus.base = 0; | |
881 | mem->bus.is_iomem = false; | |
882 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
883 | return -EINVAL; | |
884 | switch (mem->mem_type) { | |
885 | case TTM_PL_SYSTEM: | |
886 | /* System memory */ | |
887 | return 0; | |
888 | case TTM_PL_TT: | |
889 | #if __OS_HAS_AGP | |
890 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | |
d961db75 | 891 | mem->bus.offset = mem->start << PAGE_SHIFT; |
f32f02fd JG |
892 | mem->bus.base = dev_priv->gart_info.aper_base; |
893 | mem->bus.is_iomem = true; | |
894 | } | |
895 | #endif | |
896 | break; | |
897 | case TTM_PL_VRAM: | |
f869ef88 BS |
898 | { |
899 | struct nouveau_vram *vram = mem->mm_node; | |
900 | ||
901 | if (!dev_priv->bar1_vm) { | |
902 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
903 | mem->bus.base = pci_resource_start(dev->pdev, 1); | |
904 | mem->bus.is_iomem = true; | |
905 | break; | |
906 | } | |
907 | ||
908 | ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, 12, | |
909 | NV_MEM_ACCESS_RW, &vram->bar_vma); | |
910 | if (ret) | |
911 | return ret; | |
912 | ||
913 | nouveau_vm_map(&vram->bar_vma, vram); | |
914 | if (ret) { | |
915 | nouveau_vm_put(&vram->bar_vma); | |
916 | return ret; | |
917 | } | |
918 | ||
919 | mem->bus.offset = vram->bar_vma.offset; | |
920 | mem->bus.offset -= 0x0020000000ULL; | |
01d73a69 | 921 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
f32f02fd | 922 | mem->bus.is_iomem = true; |
f869ef88 | 923 | } |
f32f02fd JG |
924 | break; |
925 | default: | |
926 | return -EINVAL; | |
927 | } | |
928 | return 0; | |
929 | } | |
930 | ||
931 | static void | |
932 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
933 | { | |
f869ef88 BS |
934 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); |
935 | struct nouveau_vram *vram = mem->mm_node; | |
936 | ||
937 | if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) | |
938 | return; | |
939 | ||
940 | if (!vram->bar_vma.node) | |
941 | return; | |
942 | ||
943 | nouveau_vm_unmap(&vram->bar_vma); | |
944 | nouveau_vm_put(&vram->bar_vma); | |
f32f02fd JG |
945 | } |
946 | ||
947 | static int | |
948 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
949 | { | |
e1429b4c BS |
950 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
951 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
952 | ||
953 | /* as long as the bo isn't in vram, and isn't tiled, we've got | |
954 | * nothing to do here. | |
955 | */ | |
956 | if (bo->mem.mem_type != TTM_PL_VRAM) { | |
f13b3263 FJ |
957 | if (dev_priv->card_type < NV_50 || |
958 | !nouveau_bo_tile_layout(nvbo)) | |
e1429b4c BS |
959 | return 0; |
960 | } | |
961 | ||
962 | /* make sure bo is in mappable vram */ | |
d961db75 | 963 | if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) |
e1429b4c BS |
964 | return 0; |
965 | ||
966 | ||
967 | nvbo->placement.fpfn = 0; | |
968 | nvbo->placement.lpfn = dev_priv->fb_mappable_pages; | |
969 | nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); | |
7a45d764 | 970 | return nouveau_bo_validate(nvbo, false, true, false); |
f32f02fd JG |
971 | } |
972 | ||
332b242f FJ |
973 | void |
974 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | |
975 | { | |
23c45e8e | 976 | struct nouveau_fence *old_fence; |
332b242f FJ |
977 | |
978 | if (likely(fence)) | |
23c45e8e | 979 | nouveau_fence_ref(fence); |
332b242f | 980 | |
23c45e8e FJ |
981 | spin_lock(&nvbo->bo.bdev->fence_lock); |
982 | old_fence = nvbo->bo.sync_obj; | |
983 | nvbo->bo.sync_obj = fence; | |
332b242f | 984 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
23c45e8e FJ |
985 | |
986 | nouveau_fence_unref(&old_fence); | |
332b242f FJ |
987 | } |
988 | ||
6ee73861 BS |
989 | struct ttm_bo_driver nouveau_bo_driver = { |
990 | .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, | |
991 | .invalidate_caches = nouveau_bo_invalidate_caches, | |
992 | .init_mem_type = nouveau_bo_init_mem_type, | |
993 | .evict_flags = nouveau_bo_evict_flags, | |
994 | .move = nouveau_bo_move, | |
995 | .verify_access = nouveau_bo_verify_access, | |
382d62e5 MS |
996 | .sync_obj_signaled = __nouveau_fence_signalled, |
997 | .sync_obj_wait = __nouveau_fence_wait, | |
998 | .sync_obj_flush = __nouveau_fence_flush, | |
999 | .sync_obj_unref = __nouveau_fence_unref, | |
1000 | .sync_obj_ref = __nouveau_fence_ref, | |
f32f02fd JG |
1001 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
1002 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, | |
1003 | .io_mem_free = &nouveau_ttm_io_mem_free, | |
6ee73861 BS |
1004 | }; |
1005 |