Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright 2007 Dave Airlied | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | */ | |
24 | /* | |
25 | * Authors: Dave Airlied <airlied@linux.ie> | |
26 | * Ben Skeggs <darktama@iinet.net.au> | |
27 | * Jeremy Kolb <jkolb@brandeis.edu> | |
28 | */ | |
29 | ||
30 | #include "drmP.h" | |
b1e5f172 | 31 | #include "ttm/ttm_page_alloc.h" |
6ee73861 BS |
32 | |
33 | #include "nouveau_drm.h" | |
34 | #include "nouveau_drv.h" | |
35 | #include "nouveau_dma.h" | |
f869ef88 BS |
36 | #include "nouveau_mm.h" |
37 | #include "nouveau_vm.h" | |
6ee73861 | 38 | |
a510604d | 39 | #include <linux/log2.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
a510604d | 41 | |
6ee73861 BS |
42 | static void |
43 | nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |
44 | { | |
45 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
a0af9add | 46 | struct drm_device *dev = dev_priv->dev; |
6ee73861 BS |
47 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
48 | ||
6ee73861 BS |
49 | if (unlikely(nvbo->gem)) |
50 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | |
51 | ||
a5cf68b0 | 52 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
6ee73861 BS |
53 | kfree(nvbo); |
54 | } | |
55 | ||
a0af9add | 56 | static void |
db5c8e29 | 57 | nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, |
f91bac5b | 58 | int *align, int *size) |
a0af9add | 59 | { |
bfd83aca | 60 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
a0af9add | 61 | |
573a2a37 | 62 | if (dev_priv->card_type < NV_50) { |
bfd83aca | 63 | if (nvbo->tile_mode) { |
a0af9add FJ |
64 | if (dev_priv->chipset >= 0x40) { |
65 | *align = 65536; | |
bfd83aca | 66 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add FJ |
67 | |
68 | } else if (dev_priv->chipset >= 0x30) { | |
69 | *align = 32768; | |
bfd83aca | 70 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add FJ |
71 | |
72 | } else if (dev_priv->chipset >= 0x20) { | |
73 | *align = 16384; | |
bfd83aca | 74 | *size = roundup(*size, 64 * nvbo->tile_mode); |
a0af9add FJ |
75 | |
76 | } else if (dev_priv->chipset >= 0x10) { | |
77 | *align = 16384; | |
bfd83aca | 78 | *size = roundup(*size, 32 * nvbo->tile_mode); |
a0af9add FJ |
79 | } |
80 | } | |
bfd83aca | 81 | } else { |
f91bac5b BS |
82 | *size = roundup(*size, (1 << nvbo->page_shift)); |
83 | *align = max((1 << nvbo->page_shift), *align); | |
a0af9add FJ |
84 | } |
85 | ||
1c7059e4 | 86 | *size = roundup(*size, PAGE_SIZE); |
a0af9add FJ |
87 | } |
88 | ||
6ee73861 | 89 | int |
7375c95b BS |
90 | nouveau_bo_new(struct drm_device *dev, int size, int align, |
91 | uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, | |
92 | struct nouveau_bo **pnvbo) | |
6ee73861 BS |
93 | { |
94 | struct drm_nouveau_private *dev_priv = dev->dev_private; | |
95 | struct nouveau_bo *nvbo; | |
57de4ba9 | 96 | size_t acc_size; |
f91bac5b | 97 | int ret; |
6ee73861 BS |
98 | |
99 | nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); | |
100 | if (!nvbo) | |
101 | return -ENOMEM; | |
102 | INIT_LIST_HEAD(&nvbo->head); | |
103 | INIT_LIST_HEAD(&nvbo->entry); | |
fd2871af | 104 | INIT_LIST_HEAD(&nvbo->vma_list); |
6ee73861 BS |
105 | nvbo->tile_mode = tile_mode; |
106 | nvbo->tile_flags = tile_flags; | |
699ddfd9 | 107 | nvbo->bo.bdev = &dev_priv->ttm.bdev; |
6ee73861 | 108 | |
f91bac5b BS |
109 | nvbo->page_shift = 12; |
110 | if (dev_priv->bar1_vm) { | |
111 | if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) | |
112 | nvbo->page_shift = dev_priv->bar1_vm->lpg_shift; | |
113 | } | |
114 | ||
115 | nouveau_bo_fixup_align(nvbo, flags, &align, &size); | |
fd2871af BS |
116 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; |
117 | nouveau_bo_placement_set(nvbo, flags, 0); | |
6ee73861 | 118 | |
57de4ba9 JG |
119 | acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size, |
120 | sizeof(struct nouveau_bo)); | |
121 | ||
6ee73861 | 122 | ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, |
fd2871af | 123 | ttm_bo_type_device, &nvbo->placement, |
57de4ba9 | 124 | align >> PAGE_SHIFT, 0, false, NULL, acc_size, |
fd2871af | 125 | nouveau_bo_del_ttm); |
6ee73861 BS |
126 | if (ret) { |
127 | /* ttm will call nouveau_bo_del_ttm if it fails.. */ | |
128 | return ret; | |
129 | } | |
130 | ||
6ee73861 BS |
131 | *pnvbo = nvbo; |
132 | return 0; | |
133 | } | |
134 | ||
78ad0f7b FJ |
135 | static void |
136 | set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) | |
137 | { | |
138 | *n = 0; | |
139 | ||
140 | if (type & TTM_PL_FLAG_VRAM) | |
141 | pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; | |
142 | if (type & TTM_PL_FLAG_TT) | |
143 | pl[(*n)++] = TTM_PL_FLAG_TT | flags; | |
144 | if (type & TTM_PL_FLAG_SYSTEM) | |
145 | pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; | |
146 | } | |
147 | ||
699ddfd9 FJ |
148 | static void |
149 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | |
150 | { | |
151 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
812f219a | 152 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; |
699ddfd9 FJ |
153 | |
154 | if (dev_priv->card_type == NV_10 && | |
812f219a | 155 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
4beb116a | 156 | nvbo->bo.mem.num_pages < vram_pages / 4) { |
699ddfd9 FJ |
157 | /* |
158 | * Make sure that the color and depth buffers are handled | |
159 | * by independent memory controller units. Up to a 9x | |
160 | * speed up when alpha-blending and depth-test are enabled | |
161 | * at the same time. | |
162 | */ | |
699ddfd9 FJ |
163 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
164 | nvbo->placement.fpfn = vram_pages / 2; | |
165 | nvbo->placement.lpfn = ~0; | |
166 | } else { | |
167 | nvbo->placement.fpfn = 0; | |
168 | nvbo->placement.lpfn = vram_pages / 2; | |
169 | } | |
170 | } | |
171 | } | |
172 | ||
6ee73861 | 173 | void |
78ad0f7b | 174 | nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) |
6ee73861 | 175 | { |
78ad0f7b FJ |
176 | struct ttm_placement *pl = &nvbo->placement; |
177 | uint32_t flags = TTM_PL_MASK_CACHING | | |
178 | (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); | |
179 | ||
180 | pl->placement = nvbo->placements; | |
181 | set_placement_list(nvbo->placements, &pl->num_placement, | |
182 | type, flags); | |
183 | ||
184 | pl->busy_placement = nvbo->busy_placements; | |
185 | set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, | |
186 | type | busy, flags); | |
699ddfd9 FJ |
187 | |
188 | set_placement_range(nvbo, type); | |
6ee73861 BS |
189 | } |
190 | ||
191 | int | |
192 | nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) | |
193 | { | |
194 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
195 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 196 | int ret; |
6ee73861 BS |
197 | |
198 | if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { | |
199 | NV_ERROR(nouveau_bdev(bo->bdev)->dev, | |
200 | "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, | |
201 | 1 << bo->mem.mem_type, memtype); | |
202 | return -EINVAL; | |
203 | } | |
204 | ||
205 | if (nvbo->pin_refcnt++) | |
206 | return 0; | |
207 | ||
208 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
209 | if (ret) | |
210 | goto out; | |
211 | ||
78ad0f7b | 212 | nouveau_bo_placement_set(nvbo, memtype, 0); |
6ee73861 | 213 | |
7a45d764 | 214 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
215 | if (ret == 0) { |
216 | switch (bo->mem.mem_type) { | |
217 | case TTM_PL_VRAM: | |
218 | dev_priv->fb_aper_free -= bo->mem.size; | |
219 | break; | |
220 | case TTM_PL_TT: | |
221 | dev_priv->gart_info.aper_free -= bo->mem.size; | |
222 | break; | |
223 | default: | |
224 | break; | |
225 | } | |
226 | } | |
227 | ttm_bo_unreserve(bo); | |
228 | out: | |
229 | if (unlikely(ret)) | |
230 | nvbo->pin_refcnt--; | |
231 | return ret; | |
232 | } | |
233 | ||
234 | int | |
235 | nouveau_bo_unpin(struct nouveau_bo *nvbo) | |
236 | { | |
237 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | |
238 | struct ttm_buffer_object *bo = &nvbo->bo; | |
78ad0f7b | 239 | int ret; |
6ee73861 BS |
240 | |
241 | if (--nvbo->pin_refcnt) | |
242 | return 0; | |
243 | ||
244 | ret = ttm_bo_reserve(bo, false, false, false, 0); | |
245 | if (ret) | |
246 | return ret; | |
247 | ||
78ad0f7b | 248 | nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); |
6ee73861 | 249 | |
7a45d764 | 250 | ret = nouveau_bo_validate(nvbo, false, false, false); |
6ee73861 BS |
251 | if (ret == 0) { |
252 | switch (bo->mem.mem_type) { | |
253 | case TTM_PL_VRAM: | |
254 | dev_priv->fb_aper_free += bo->mem.size; | |
255 | break; | |
256 | case TTM_PL_TT: | |
257 | dev_priv->gart_info.aper_free += bo->mem.size; | |
258 | break; | |
259 | default: | |
260 | break; | |
261 | } | |
262 | } | |
263 | ||
264 | ttm_bo_unreserve(bo); | |
265 | return ret; | |
266 | } | |
267 | ||
268 | int | |
269 | nouveau_bo_map(struct nouveau_bo *nvbo) | |
270 | { | |
271 | int ret; | |
272 | ||
273 | ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); | |
274 | if (ret) | |
275 | return ret; | |
276 | ||
277 | ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); | |
278 | ttm_bo_unreserve(&nvbo->bo); | |
279 | return ret; | |
280 | } | |
281 | ||
282 | void | |
283 | nouveau_bo_unmap(struct nouveau_bo *nvbo) | |
284 | { | |
9d59e8a1 BS |
285 | if (nvbo) |
286 | ttm_bo_kunmap(&nvbo->kmap); | |
6ee73861 BS |
287 | } |
288 | ||
7a45d764 BS |
289 | int |
290 | nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, | |
291 | bool no_wait_reserve, bool no_wait_gpu) | |
292 | { | |
293 | int ret; | |
294 | ||
295 | ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible, | |
296 | no_wait_reserve, no_wait_gpu); | |
297 | if (ret) | |
298 | return ret; | |
299 | ||
300 | return 0; | |
301 | } | |
302 | ||
6ee73861 BS |
303 | u16 |
304 | nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) | |
305 | { | |
306 | bool is_iomem; | |
307 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
308 | mem = &mem[index]; | |
309 | if (is_iomem) | |
310 | return ioread16_native((void __force __iomem *)mem); | |
311 | else | |
312 | return *mem; | |
313 | } | |
314 | ||
315 | void | |
316 | nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) | |
317 | { | |
318 | bool is_iomem; | |
319 | u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
320 | mem = &mem[index]; | |
321 | if (is_iomem) | |
322 | iowrite16_native(val, (void __force __iomem *)mem); | |
323 | else | |
324 | *mem = val; | |
325 | } | |
326 | ||
327 | u32 | |
328 | nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) | |
329 | { | |
330 | bool is_iomem; | |
331 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
332 | mem = &mem[index]; | |
333 | if (is_iomem) | |
334 | return ioread32_native((void __force __iomem *)mem); | |
335 | else | |
336 | return *mem; | |
337 | } | |
338 | ||
339 | void | |
340 | nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) | |
341 | { | |
342 | bool is_iomem; | |
343 | u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); | |
344 | mem = &mem[index]; | |
345 | if (is_iomem) | |
346 | iowrite32_native(val, (void __force __iomem *)mem); | |
347 | else | |
348 | *mem = val; | |
349 | } | |
350 | ||
649bf3ca JG |
351 | static struct ttm_tt * |
352 | nouveau_ttm_tt_create(struct ttm_bo_device *bdev, | |
353 | unsigned long size, uint32_t page_flags, | |
354 | struct page *dummy_read_page) | |
6ee73861 BS |
355 | { |
356 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
357 | struct drm_device *dev = dev_priv->dev; | |
358 | ||
359 | switch (dev_priv->gart_info.type) { | |
b694dfb2 | 360 | #if __OS_HAS_AGP |
6ee73861 | 361 | case NOUVEAU_GART_AGP: |
649bf3ca JG |
362 | return ttm_agp_tt_create(bdev, dev->agp->bridge, |
363 | size, page_flags, dummy_read_page); | |
b694dfb2 | 364 | #endif |
58e6c7a9 BS |
365 | case NOUVEAU_GART_PDMA: |
366 | case NOUVEAU_GART_HW: | |
649bf3ca JG |
367 | return nouveau_sgdma_create_ttm(bdev, size, page_flags, |
368 | dummy_read_page); | |
6ee73861 BS |
369 | default: |
370 | NV_ERROR(dev, "Unknown GART type %d\n", | |
371 | dev_priv->gart_info.type); | |
372 | break; | |
373 | } | |
374 | ||
375 | return NULL; | |
376 | } | |
377 | ||
378 | static int | |
379 | nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
380 | { | |
381 | /* We'll do this from user space. */ | |
382 | return 0; | |
383 | } | |
384 | ||
385 | static int | |
386 | nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
387 | struct ttm_mem_type_manager *man) | |
388 | { | |
389 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
390 | struct drm_device *dev = dev_priv->dev; | |
391 | ||
392 | switch (type) { | |
393 | case TTM_PL_SYSTEM: | |
394 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
395 | man->available_caching = TTM_PL_MASK_CACHING; | |
396 | man->default_caching = TTM_PL_FLAG_CACHED; | |
397 | break; | |
398 | case TTM_PL_VRAM: | |
8984e046 | 399 | if (dev_priv->card_type >= NV_50) { |
573a2a37 | 400 | man->func = &nouveau_vram_manager; |
f869ef88 BS |
401 | man->io_reserve_fastpath = false; |
402 | man->use_io_reserve_lru = true; | |
403 | } else { | |
573a2a37 | 404 | man->func = &ttm_bo_manager_func; |
f869ef88 | 405 | } |
6ee73861 | 406 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
f32f02fd | 407 | TTM_MEMTYPE_FLAG_MAPPABLE; |
6ee73861 BS |
408 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
409 | TTM_PL_FLAG_WC; | |
410 | man->default_caching = TTM_PL_FLAG_WC; | |
6ee73861 BS |
411 | break; |
412 | case TTM_PL_TT: | |
26c0c9e3 BS |
413 | if (dev_priv->card_type >= NV_50) |
414 | man->func = &nouveau_gart_manager; | |
415 | else | |
416 | man->func = &ttm_bo_manager_func; | |
6ee73861 BS |
417 | switch (dev_priv->gart_info.type) { |
418 | case NOUVEAU_GART_AGP: | |
f32f02fd | 419 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
a3d487ea FJ |
420 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
421 | TTM_PL_FLAG_WC; | |
422 | man->default_caching = TTM_PL_FLAG_WC; | |
6ee73861 | 423 | break; |
58e6c7a9 BS |
424 | case NOUVEAU_GART_PDMA: |
425 | case NOUVEAU_GART_HW: | |
6ee73861 BS |
426 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | |
427 | TTM_MEMTYPE_FLAG_CMA; | |
428 | man->available_caching = TTM_PL_MASK_CACHING; | |
429 | man->default_caching = TTM_PL_FLAG_CACHED; | |
430 | break; | |
431 | default: | |
432 | NV_ERROR(dev, "Unknown GART type: %d\n", | |
433 | dev_priv->gart_info.type); | |
434 | return -EINVAL; | |
435 | } | |
6ee73861 BS |
436 | break; |
437 | default: | |
438 | NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); | |
439 | return -EINVAL; | |
440 | } | |
441 | return 0; | |
442 | } | |
443 | ||
444 | static void | |
445 | nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) | |
446 | { | |
447 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
448 | ||
449 | switch (bo->mem.mem_type) { | |
22fbd538 | 450 | case TTM_PL_VRAM: |
78ad0f7b FJ |
451 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, |
452 | TTM_PL_FLAG_SYSTEM); | |
22fbd538 | 453 | break; |
6ee73861 | 454 | default: |
78ad0f7b | 455 | nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); |
6ee73861 BS |
456 | break; |
457 | } | |
22fbd538 FJ |
458 | |
459 | *pl = nvbo->placement; | |
6ee73861 BS |
460 | } |
461 | ||
462 | ||
463 | /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access | |
464 | * TTM_PL_{VRAM,TT} directly. | |
465 | */ | |
a0af9add | 466 | |
6ee73861 BS |
467 | static int |
468 | nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, | |
9d87fa21 JG |
469 | struct nouveau_bo *nvbo, bool evict, |
470 | bool no_wait_reserve, bool no_wait_gpu, | |
6ee73861 BS |
471 | struct ttm_mem_reg *new_mem) |
472 | { | |
473 | struct nouveau_fence *fence = NULL; | |
474 | int ret; | |
475 | ||
476 | ret = nouveau_fence_new(chan, &fence, true); | |
477 | if (ret) | |
478 | return ret; | |
479 | ||
64798817 | 480 | ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, |
311ab694 | 481 | no_wait_reserve, no_wait_gpu, new_mem); |
382d62e5 | 482 | nouveau_fence_unref(&fence); |
6ee73861 BS |
483 | return ret; |
484 | } | |
485 | ||
183720b8 BS |
486 | static int |
487 | nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
488 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
489 | { | |
d2f96666 BS |
490 | struct nouveau_mem *node = old_mem->mm_node; |
491 | u64 src_offset = node->vma[0].offset; | |
492 | u64 dst_offset = node->vma[1].offset; | |
183720b8 BS |
493 | u32 page_count = new_mem->num_pages; |
494 | int ret; | |
495 | ||
183720b8 BS |
496 | page_count = new_mem->num_pages; |
497 | while (page_count) { | |
498 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
499 | ||
500 | ret = RING_SPACE(chan, 12); | |
501 | if (ret) | |
502 | return ret; | |
503 | ||
504 | BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0238, 2); | |
505 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
506 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
507 | BEGIN_NVC0(chan, 2, NvSubM2MF, 0x030c, 6); | |
508 | OUT_RING (chan, upper_32_bits(src_offset)); | |
509 | OUT_RING (chan, lower_32_bits(src_offset)); | |
510 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
511 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
512 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
513 | OUT_RING (chan, line_count); | |
514 | BEGIN_NVC0(chan, 2, NvSubM2MF, 0x0300, 1); | |
515 | OUT_RING (chan, 0x00100110); | |
516 | ||
517 | page_count -= line_count; | |
518 | src_offset += (PAGE_SIZE * line_count); | |
519 | dst_offset += (PAGE_SIZE * line_count); | |
520 | } | |
521 | ||
522 | return 0; | |
523 | } | |
524 | ||
6ee73861 | 525 | static int |
f1ab0cc9 BS |
526 | nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, |
527 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
6ee73861 | 528 | { |
d2f96666 | 529 | struct nouveau_mem *node = old_mem->mm_node; |
f1ab0cc9 BS |
530 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
531 | u64 length = (new_mem->num_pages << PAGE_SHIFT); | |
d2f96666 BS |
532 | u64 src_offset = node->vma[0].offset; |
533 | u64 dst_offset = node->vma[1].offset; | |
6ee73861 BS |
534 | int ret; |
535 | ||
f1ab0cc9 BS |
536 | while (length) { |
537 | u32 amount, stride, height; | |
538 | ||
5220b3c1 BS |
539 | amount = min(length, (u64)(4 * 1024 * 1024)); |
540 | stride = 16 * 4; | |
f1ab0cc9 BS |
541 | height = amount / stride; |
542 | ||
f13b3263 FJ |
543 | if (new_mem->mem_type == TTM_PL_VRAM && |
544 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
545 | ret = RING_SPACE(chan, 8); |
546 | if (ret) | |
547 | return ret; | |
548 | ||
549 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); | |
550 | OUT_RING (chan, 0); | |
5220b3c1 | 551 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
552 | OUT_RING (chan, stride); |
553 | OUT_RING (chan, height); | |
554 | OUT_RING (chan, 1); | |
555 | OUT_RING (chan, 0); | |
556 | OUT_RING (chan, 0); | |
557 | } else { | |
558 | ret = RING_SPACE(chan, 2); | |
559 | if (ret) | |
560 | return ret; | |
561 | ||
562 | BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); | |
563 | OUT_RING (chan, 1); | |
564 | } | |
f13b3263 FJ |
565 | if (old_mem->mem_type == TTM_PL_VRAM && |
566 | nouveau_bo_tile_layout(nvbo)) { | |
f1ab0cc9 BS |
567 | ret = RING_SPACE(chan, 8); |
568 | if (ret) | |
569 | return ret; | |
570 | ||
571 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); | |
572 | OUT_RING (chan, 0); | |
5220b3c1 | 573 | OUT_RING (chan, 0); |
f1ab0cc9 BS |
574 | OUT_RING (chan, stride); |
575 | OUT_RING (chan, height); | |
576 | OUT_RING (chan, 1); | |
577 | OUT_RING (chan, 0); | |
578 | OUT_RING (chan, 0); | |
579 | } else { | |
580 | ret = RING_SPACE(chan, 2); | |
581 | if (ret) | |
582 | return ret; | |
583 | ||
584 | BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); | |
585 | OUT_RING (chan, 1); | |
586 | } | |
587 | ||
588 | ret = RING_SPACE(chan, 14); | |
6ee73861 BS |
589 | if (ret) |
590 | return ret; | |
f1ab0cc9 BS |
591 | |
592 | BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); | |
593 | OUT_RING (chan, upper_32_bits(src_offset)); | |
594 | OUT_RING (chan, upper_32_bits(dst_offset)); | |
595 | BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); | |
596 | OUT_RING (chan, lower_32_bits(src_offset)); | |
597 | OUT_RING (chan, lower_32_bits(dst_offset)); | |
598 | OUT_RING (chan, stride); | |
599 | OUT_RING (chan, stride); | |
600 | OUT_RING (chan, stride); | |
601 | OUT_RING (chan, height); | |
602 | OUT_RING (chan, 0x00000101); | |
603 | OUT_RING (chan, 0x00000000); | |
604 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); | |
605 | OUT_RING (chan, 0); | |
606 | ||
607 | length -= amount; | |
608 | src_offset += amount; | |
609 | dst_offset += amount; | |
6ee73861 BS |
610 | } |
611 | ||
f1ab0cc9 BS |
612 | return 0; |
613 | } | |
614 | ||
a6704788 BS |
615 | static inline uint32_t |
616 | nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, | |
617 | struct nouveau_channel *chan, struct ttm_mem_reg *mem) | |
618 | { | |
619 | if (mem->mem_type == TTM_PL_TT) | |
620 | return chan->gart_handle; | |
621 | return chan->vram_handle; | |
622 | } | |
623 | ||
f1ab0cc9 BS |
624 | static int |
625 | nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, | |
626 | struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) | |
627 | { | |
d961db75 BS |
628 | u32 src_offset = old_mem->start << PAGE_SHIFT; |
629 | u32 dst_offset = new_mem->start << PAGE_SHIFT; | |
f1ab0cc9 BS |
630 | u32 page_count = new_mem->num_pages; |
631 | int ret; | |
632 | ||
633 | ret = RING_SPACE(chan, 3); | |
634 | if (ret) | |
635 | return ret; | |
636 | ||
637 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); | |
638 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); | |
639 | OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); | |
640 | ||
6ee73861 BS |
641 | page_count = new_mem->num_pages; |
642 | while (page_count) { | |
643 | int line_count = (page_count > 2047) ? 2047 : page_count; | |
644 | ||
6ee73861 BS |
645 | ret = RING_SPACE(chan, 11); |
646 | if (ret) | |
647 | return ret; | |
f1ab0cc9 | 648 | |
6ee73861 BS |
649 | BEGIN_RING(chan, NvSubM2MF, |
650 | NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); | |
f1ab0cc9 BS |
651 | OUT_RING (chan, src_offset); |
652 | OUT_RING (chan, dst_offset); | |
653 | OUT_RING (chan, PAGE_SIZE); /* src_pitch */ | |
654 | OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ | |
655 | OUT_RING (chan, PAGE_SIZE); /* line_length */ | |
656 | OUT_RING (chan, line_count); | |
657 | OUT_RING (chan, 0x00000101); | |
658 | OUT_RING (chan, 0x00000000); | |
6ee73861 | 659 | BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); |
f1ab0cc9 | 660 | OUT_RING (chan, 0); |
6ee73861 BS |
661 | |
662 | page_count -= line_count; | |
663 | src_offset += (PAGE_SIZE * line_count); | |
664 | dst_offset += (PAGE_SIZE * line_count); | |
665 | } | |
666 | ||
f1ab0cc9 BS |
667 | return 0; |
668 | } | |
669 | ||
d2f96666 BS |
670 | static int |
671 | nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo, | |
672 | struct ttm_mem_reg *mem, struct nouveau_vma *vma) | |
673 | { | |
674 | struct nouveau_mem *node = mem->mm_node; | |
675 | int ret; | |
676 | ||
677 | ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT, | |
678 | node->page_shift, NV_MEM_ACCESS_RO, vma); | |
679 | if (ret) | |
680 | return ret; | |
681 | ||
682 | if (mem->mem_type == TTM_PL_VRAM) | |
683 | nouveau_vm_map(vma, node); | |
684 | else | |
685 | nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, | |
686 | node, node->pages); | |
687 | ||
688 | return 0; | |
689 | } | |
690 | ||
f1ab0cc9 BS |
691 | static int |
692 | nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, | |
693 | bool no_wait_reserve, bool no_wait_gpu, | |
694 | struct ttm_mem_reg *new_mem) | |
695 | { | |
696 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
697 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
3425df48 | 698 | struct ttm_mem_reg *old_mem = &bo->mem; |
f1ab0cc9 BS |
699 | struct nouveau_channel *chan; |
700 | int ret; | |
701 | ||
702 | chan = nvbo->channel; | |
d550c41e | 703 | if (!chan) { |
f1ab0cc9 | 704 | chan = dev_priv->channel; |
e419cf09 | 705 | mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX); |
6a6b73f2 | 706 | } |
f1ab0cc9 | 707 | |
d2f96666 BS |
708 | /* create temporary vmas for the transfer and attach them to the |
709 | * old nouveau_mem node, these will get cleaned up after ttm has | |
710 | * destroyed the ttm_mem_reg | |
3425df48 | 711 | */ |
26c0c9e3 | 712 | if (dev_priv->card_type >= NV_50) { |
d5f42394 | 713 | struct nouveau_mem *node = old_mem->mm_node; |
3425df48 | 714 | |
d2f96666 BS |
715 | ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]); |
716 | if (ret) | |
717 | goto out; | |
718 | ||
719 | ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]); | |
720 | if (ret) | |
721 | goto out; | |
3425df48 BS |
722 | } |
723 | ||
f1ab0cc9 BS |
724 | if (dev_priv->card_type < NV_50) |
725 | ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | |
726 | else | |
183720b8 | 727 | if (dev_priv->card_type < NV_C0) |
f1ab0cc9 | 728 | ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); |
183720b8 BS |
729 | else |
730 | ret = nvc0_bo_move_m2mf(chan, bo, &bo->mem, new_mem); | |
6a6b73f2 BS |
731 | if (ret == 0) { |
732 | ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict, | |
733 | no_wait_reserve, | |
734 | no_wait_gpu, new_mem); | |
735 | } | |
f1ab0cc9 | 736 | |
3425df48 | 737 | out: |
6a6b73f2 BS |
738 | if (chan == dev_priv->channel) |
739 | mutex_unlock(&chan->mutex); | |
740 | return ret; | |
6ee73861 BS |
741 | } |
742 | ||
743 | static int | |
744 | nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
745 | bool no_wait_reserve, bool no_wait_gpu, |
746 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
747 | { |
748 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
749 | struct ttm_placement placement; | |
750 | struct ttm_mem_reg tmp_mem; | |
751 | int ret; | |
752 | ||
753 | placement.fpfn = placement.lpfn = 0; | |
754 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 755 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
756 | |
757 | tmp_mem = *new_mem; | |
758 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 759 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
760 | if (ret) |
761 | return ret; | |
762 | ||
763 | ret = ttm_tt_bind(bo->ttm, &tmp_mem); | |
764 | if (ret) | |
765 | goto out; | |
766 | ||
9d87fa21 | 767 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
768 | if (ret) |
769 | goto out; | |
770 | ||
b8884da6 | 771 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 772 | out: |
42311ff9 | 773 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
774 | return ret; |
775 | } | |
776 | ||
777 | static int | |
778 | nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
779 | bool no_wait_reserve, bool no_wait_gpu, |
780 | struct ttm_mem_reg *new_mem) | |
6ee73861 BS |
781 | { |
782 | u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; | |
783 | struct ttm_placement placement; | |
784 | struct ttm_mem_reg tmp_mem; | |
785 | int ret; | |
786 | ||
787 | placement.fpfn = placement.lpfn = 0; | |
788 | placement.num_placement = placement.num_busy_placement = 1; | |
77e2b5ed | 789 | placement.placement = placement.busy_placement = &placement_memtype; |
6ee73861 BS |
790 | |
791 | tmp_mem = *new_mem; | |
792 | tmp_mem.mm_node = NULL; | |
9d87fa21 | 793 | ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); |
6ee73861 BS |
794 | if (ret) |
795 | return ret; | |
796 | ||
b8884da6 | 797 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
6ee73861 BS |
798 | if (ret) |
799 | goto out; | |
800 | ||
b8884da6 | 801 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 BS |
802 | if (ret) |
803 | goto out; | |
804 | ||
805 | out: | |
42311ff9 | 806 | ttm_bo_mem_put(bo, &tmp_mem); |
6ee73861 BS |
807 | return ret; |
808 | } | |
809 | ||
a4154bbf BS |
810 | static void |
811 | nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) | |
812 | { | |
26c0c9e3 | 813 | struct nouveau_mem *node = new_mem->mm_node; |
a4154bbf | 814 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
fd2871af BS |
815 | struct nouveau_vma *vma; |
816 | ||
817 | list_for_each_entry(vma, &nvbo->vma_list, head) { | |
dc97b340 | 818 | if (new_mem && new_mem->mem_type == TTM_PL_VRAM) { |
fd2871af BS |
819 | nouveau_vm_map(vma, new_mem->mm_node); |
820 | } else | |
dc97b340 | 821 | if (new_mem && new_mem->mem_type == TTM_PL_TT && |
fd2871af BS |
822 | nvbo->page_shift == vma->vm->spg_shift) { |
823 | nouveau_vm_map_sg(vma, 0, new_mem-> | |
824 | num_pages << PAGE_SHIFT, | |
825 | node, node->pages); | |
826 | } else { | |
827 | nouveau_vm_unmap(vma); | |
828 | } | |
a4154bbf BS |
829 | } |
830 | } | |
831 | ||
6ee73861 | 832 | static int |
a0af9add FJ |
833 | nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, |
834 | struct nouveau_tile_reg **new_tile) | |
6ee73861 BS |
835 | { |
836 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
6ee73861 | 837 | struct drm_device *dev = dev_priv->dev; |
a0af9add | 838 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
a4154bbf | 839 | u64 offset = new_mem->start << PAGE_SHIFT; |
6ee73861 | 840 | |
a4154bbf BS |
841 | *new_tile = NULL; |
842 | if (new_mem->mem_type != TTM_PL_VRAM) | |
a0af9add | 843 | return 0; |
a0af9add | 844 | |
a4154bbf | 845 | if (dev_priv->card_type >= NV_10) { |
a0af9add | 846 | *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, |
a5cf68b0 FJ |
847 | nvbo->tile_mode, |
848 | nvbo->tile_flags); | |
6ee73861 BS |
849 | } |
850 | ||
a0af9add FJ |
851 | return 0; |
852 | } | |
853 | ||
854 | static void | |
855 | nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, | |
856 | struct nouveau_tile_reg *new_tile, | |
857 | struct nouveau_tile_reg **old_tile) | |
858 | { | |
859 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
860 | struct drm_device *dev = dev_priv->dev; | |
861 | ||
a4154bbf BS |
862 | nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj); |
863 | *old_tile = new_tile; | |
a0af9add FJ |
864 | } |
865 | ||
866 | static int | |
867 | nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, | |
9d87fa21 JG |
868 | bool no_wait_reserve, bool no_wait_gpu, |
869 | struct ttm_mem_reg *new_mem) | |
a0af9add FJ |
870 | { |
871 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); | |
872 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
873 | struct ttm_mem_reg *old_mem = &bo->mem; | |
874 | struct nouveau_tile_reg *new_tile = NULL; | |
875 | int ret = 0; | |
876 | ||
a4154bbf BS |
877 | if (dev_priv->card_type < NV_50) { |
878 | ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); | |
879 | if (ret) | |
880 | return ret; | |
881 | } | |
a0af9add | 882 | |
a0af9add | 883 | /* Fake bo copy. */ |
6ee73861 BS |
884 | if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { |
885 | BUG_ON(bo->mem.mm_node != NULL); | |
886 | bo->mem = *new_mem; | |
887 | new_mem->mm_node = NULL; | |
a0af9add | 888 | goto out; |
6ee73861 BS |
889 | } |
890 | ||
b8a6a804 | 891 | /* Software copy if the card isn't up and running yet. */ |
183720b8 | 892 | if (!dev_priv->channel) { |
b8a6a804 BS |
893 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
894 | goto out; | |
895 | } | |
896 | ||
a0af9add FJ |
897 | /* Hardware assisted copy. */ |
898 | if (new_mem->mem_type == TTM_PL_SYSTEM) | |
9d87fa21 | 899 | ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 900 | else if (old_mem->mem_type == TTM_PL_SYSTEM) |
9d87fa21 | 901 | ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add | 902 | else |
9d87fa21 | 903 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); |
6ee73861 | 904 | |
a0af9add FJ |
905 | if (!ret) |
906 | goto out; | |
907 | ||
908 | /* Fallback to software copy. */ | |
9d87fa21 | 909 | ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); |
a0af9add FJ |
910 | |
911 | out: | |
a4154bbf BS |
912 | if (dev_priv->card_type < NV_50) { |
913 | if (ret) | |
914 | nouveau_bo_vm_cleanup(bo, NULL, &new_tile); | |
915 | else | |
916 | nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); | |
917 | } | |
a0af9add FJ |
918 | |
919 | return ret; | |
6ee73861 BS |
920 | } |
921 | ||
922 | static int | |
923 | nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
924 | { | |
925 | return 0; | |
926 | } | |
927 | ||
f32f02fd JG |
928 | static int |
929 | nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
930 | { | |
931 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
932 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); | |
933 | struct drm_device *dev = dev_priv->dev; | |
f869ef88 | 934 | int ret; |
f32f02fd JG |
935 | |
936 | mem->bus.addr = NULL; | |
937 | mem->bus.offset = 0; | |
938 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
939 | mem->bus.base = 0; | |
940 | mem->bus.is_iomem = false; | |
941 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
942 | return -EINVAL; | |
943 | switch (mem->mem_type) { | |
944 | case TTM_PL_SYSTEM: | |
945 | /* System memory */ | |
946 | return 0; | |
947 | case TTM_PL_TT: | |
948 | #if __OS_HAS_AGP | |
949 | if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { | |
d961db75 | 950 | mem->bus.offset = mem->start << PAGE_SHIFT; |
f32f02fd JG |
951 | mem->bus.base = dev_priv->gart_info.aper_base; |
952 | mem->bus.is_iomem = true; | |
953 | } | |
954 | #endif | |
955 | break; | |
956 | case TTM_PL_VRAM: | |
f869ef88 | 957 | { |
d5f42394 | 958 | struct nouveau_mem *node = mem->mm_node; |
8984e046 | 959 | u8 page_shift; |
f869ef88 BS |
960 | |
961 | if (!dev_priv->bar1_vm) { | |
962 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
963 | mem->bus.base = pci_resource_start(dev->pdev, 1); | |
964 | mem->bus.is_iomem = true; | |
965 | break; | |
966 | } | |
967 | ||
2e9733ff | 968 | if (dev_priv->card_type >= NV_C0) |
d5f42394 | 969 | page_shift = node->page_shift; |
8984e046 BS |
970 | else |
971 | page_shift = 12; | |
972 | ||
4c74eb7f | 973 | ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, |
8984e046 | 974 | page_shift, NV_MEM_ACCESS_RW, |
d5f42394 | 975 | &node->bar_vma); |
f869ef88 BS |
976 | if (ret) |
977 | return ret; | |
978 | ||
d5f42394 | 979 | nouveau_vm_map(&node->bar_vma, node); |
f869ef88 | 980 | if (ret) { |
d5f42394 | 981 | nouveau_vm_put(&node->bar_vma); |
f869ef88 BS |
982 | return ret; |
983 | } | |
984 | ||
d5f42394 | 985 | mem->bus.offset = node->bar_vma.offset; |
8984e046 BS |
986 | if (dev_priv->card_type == NV_50) /*XXX*/ |
987 | mem->bus.offset -= 0x0020000000ULL; | |
01d73a69 | 988 | mem->bus.base = pci_resource_start(dev->pdev, 1); |
f32f02fd | 989 | mem->bus.is_iomem = true; |
f869ef88 | 990 | } |
f32f02fd JG |
991 | break; |
992 | default: | |
993 | return -EINVAL; | |
994 | } | |
995 | return 0; | |
996 | } | |
997 | ||
998 | static void | |
999 | nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
1000 | { | |
f869ef88 | 1001 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); |
d5f42394 | 1002 | struct nouveau_mem *node = mem->mm_node; |
f869ef88 BS |
1003 | |
1004 | if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) | |
1005 | return; | |
1006 | ||
d5f42394 | 1007 | if (!node->bar_vma.node) |
f869ef88 BS |
1008 | return; |
1009 | ||
d5f42394 BS |
1010 | nouveau_vm_unmap(&node->bar_vma); |
1011 | nouveau_vm_put(&node->bar_vma); | |
f32f02fd JG |
1012 | } |
1013 | ||
1014 | static int | |
1015 | nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) | |
1016 | { | |
e1429b4c BS |
1017 | struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); |
1018 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
1019 | ||
1020 | /* as long as the bo isn't in vram, and isn't tiled, we've got | |
1021 | * nothing to do here. | |
1022 | */ | |
1023 | if (bo->mem.mem_type != TTM_PL_VRAM) { | |
f13b3263 FJ |
1024 | if (dev_priv->card_type < NV_50 || |
1025 | !nouveau_bo_tile_layout(nvbo)) | |
e1429b4c BS |
1026 | return 0; |
1027 | } | |
1028 | ||
1029 | /* make sure bo is in mappable vram */ | |
d961db75 | 1030 | if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) |
e1429b4c BS |
1031 | return 0; |
1032 | ||
1033 | ||
1034 | nvbo->placement.fpfn = 0; | |
1035 | nvbo->placement.lpfn = dev_priv->fb_mappable_pages; | |
1036 | nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); | |
7a45d764 | 1037 | return nouveau_bo_validate(nvbo, false, true, false); |
f32f02fd JG |
1038 | } |
1039 | ||
332b242f FJ |
1040 | void |
1041 | nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) | |
1042 | { | |
23c45e8e | 1043 | struct nouveau_fence *old_fence; |
332b242f FJ |
1044 | |
1045 | if (likely(fence)) | |
23c45e8e | 1046 | nouveau_fence_ref(fence); |
332b242f | 1047 | |
23c45e8e FJ |
1048 | spin_lock(&nvbo->bo.bdev->fence_lock); |
1049 | old_fence = nvbo->bo.sync_obj; | |
1050 | nvbo->bo.sync_obj = fence; | |
332b242f | 1051 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
23c45e8e FJ |
1052 | |
1053 | nouveau_fence_unref(&old_fence); | |
332b242f FJ |
1054 | } |
1055 | ||
3230cfc3 KRW |
1056 | static int |
1057 | nouveau_ttm_tt_populate(struct ttm_tt *ttm) | |
1058 | { | |
8e7e7052 | 1059 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
3230cfc3 KRW |
1060 | struct drm_nouveau_private *dev_priv; |
1061 | struct drm_device *dev; | |
1062 | unsigned i; | |
1063 | int r; | |
1064 | ||
1065 | if (ttm->state != tt_unpopulated) | |
1066 | return 0; | |
1067 | ||
1068 | dev_priv = nouveau_bdev(ttm->bdev); | |
1069 | dev = dev_priv->dev; | |
1070 | ||
1071 | #ifdef CONFIG_SWIOTLB | |
1072 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 1073 | return ttm_dma_populate((void *)ttm, dev->dev); |
3230cfc3 KRW |
1074 | } |
1075 | #endif | |
1076 | ||
1077 | r = ttm_pool_populate(ttm); | |
1078 | if (r) { | |
1079 | return r; | |
1080 | } | |
1081 | ||
1082 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 | 1083 | ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i], |
3230cfc3 KRW |
1084 | 0, PAGE_SIZE, |
1085 | PCI_DMA_BIDIRECTIONAL); | |
8e7e7052 | 1086 | if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) { |
3230cfc3 | 1087 | while (--i) { |
8e7e7052 | 1088 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], |
3230cfc3 | 1089 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 1090 | ttm_dma->dma_address[i] = 0; |
3230cfc3 KRW |
1091 | } |
1092 | ttm_pool_unpopulate(ttm); | |
1093 | return -EFAULT; | |
1094 | } | |
1095 | } | |
1096 | return 0; | |
1097 | } | |
1098 | ||
1099 | static void | |
1100 | nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
1101 | { | |
8e7e7052 | 1102 | struct ttm_dma_tt *ttm_dma = (void *)ttm; |
3230cfc3 KRW |
1103 | struct drm_nouveau_private *dev_priv; |
1104 | struct drm_device *dev; | |
1105 | unsigned i; | |
1106 | ||
1107 | dev_priv = nouveau_bdev(ttm->bdev); | |
1108 | dev = dev_priv->dev; | |
1109 | ||
1110 | #ifdef CONFIG_SWIOTLB | |
1111 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 1112 | ttm_dma_unpopulate((void *)ttm, dev->dev); |
3230cfc3 KRW |
1113 | return; |
1114 | } | |
1115 | #endif | |
1116 | ||
1117 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
1118 | if (ttm_dma->dma_address[i]) { |
1119 | pci_unmap_page(dev->pdev, ttm_dma->dma_address[i], | |
3230cfc3 KRW |
1120 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
1121 | } | |
1122 | } | |
1123 | ||
1124 | ttm_pool_unpopulate(ttm); | |
1125 | } | |
1126 | ||
6ee73861 | 1127 | struct ttm_bo_driver nouveau_bo_driver = { |
649bf3ca | 1128 | .ttm_tt_create = &nouveau_ttm_tt_create, |
3230cfc3 KRW |
1129 | .ttm_tt_populate = &nouveau_ttm_tt_populate, |
1130 | .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, | |
6ee73861 BS |
1131 | .invalidate_caches = nouveau_bo_invalidate_caches, |
1132 | .init_mem_type = nouveau_bo_init_mem_type, | |
1133 | .evict_flags = nouveau_bo_evict_flags, | |
a4154bbf | 1134 | .move_notify = nouveau_bo_move_ntfy, |
6ee73861 BS |
1135 | .move = nouveau_bo_move, |
1136 | .verify_access = nouveau_bo_verify_access, | |
382d62e5 MS |
1137 | .sync_obj_signaled = __nouveau_fence_signalled, |
1138 | .sync_obj_wait = __nouveau_fence_wait, | |
1139 | .sync_obj_flush = __nouveau_fence_flush, | |
1140 | .sync_obj_unref = __nouveau_fence_unref, | |
1141 | .sync_obj_ref = __nouveau_fence_ref, | |
f32f02fd JG |
1142 | .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, |
1143 | .io_mem_reserve = &nouveau_ttm_io_mem_reserve, | |
1144 | .io_mem_free = &nouveau_ttm_io_mem_free, | |
6ee73861 BS |
1145 | }; |
1146 | ||
fd2871af BS |
1147 | struct nouveau_vma * |
1148 | nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) | |
1149 | { | |
1150 | struct nouveau_vma *vma; | |
1151 | list_for_each_entry(vma, &nvbo->vma_list, head) { | |
1152 | if (vma->vm == vm) | |
1153 | return vma; | |
1154 | } | |
1155 | ||
1156 | return NULL; | |
1157 | } | |
1158 | ||
1159 | int | |
1160 | nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, | |
1161 | struct nouveau_vma *vma) | |
1162 | { | |
1163 | const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; | |
1164 | struct nouveau_mem *node = nvbo->bo.mem.mm_node; | |
1165 | int ret; | |
1166 | ||
1167 | ret = nouveau_vm_get(vm, size, nvbo->page_shift, | |
1168 | NV_MEM_ACCESS_RW, vma); | |
1169 | if (ret) | |
1170 | return ret; | |
1171 | ||
1172 | if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) | |
1173 | nouveau_vm_map(vma, nvbo->bo.mem.mm_node); | |
1174 | else | |
1175 | if (nvbo->bo.mem.mem_type == TTM_PL_TT) | |
1176 | nouveau_vm_map_sg(vma, 0, size, node, node->pages); | |
1177 | ||
1178 | list_add_tail(&vma->head, &nvbo->vma_list); | |
2fd3db6f | 1179 | vma->refcount = 1; |
fd2871af BS |
1180 | return 0; |
1181 | } | |
1182 | ||
1183 | void | |
1184 | nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) | |
1185 | { | |
1186 | if (vma->node) { | |
1187 | if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) { | |
1188 | spin_lock(&nvbo->bo.bdev->fence_lock); | |
1717c0e2 | 1189 | ttm_bo_wait(&nvbo->bo, false, false, false); |
fd2871af BS |
1190 | spin_unlock(&nvbo->bo.bdev->fence_lock); |
1191 | nouveau_vm_unmap(vma); | |
1192 | } | |
1193 | ||
1194 | nouveau_vm_put(vma); | |
1195 | list_del(&vma->head); | |
1196 | } | |
1197 | } |