Commit | Line | Data |
---|---|---|
6ee73861 BS |
1 | /* |
2 | * Copyright (c) 2007-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA, | |
3 | * All Rights Reserved. | |
4 | * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA, | |
5 | * All Rights Reserved. | |
6 | * | |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the "Software"), | |
9 | * to deal in the Software without restriction, including without limitation | |
10 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
11 | * and/or sell copies of the Software, and to permit persons to whom the | |
12 | * Software is furnished to do so, subject to the following conditions: | |
13 | * | |
14 | * The above copyright notice and this permission notice (including the | |
15 | * next paragraph) shall be included in all copies or substantial portions | |
16 | * of the Software. | |
17 | * | |
18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
20 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
21 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
22 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
23 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
24 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
25 | */ | |
26 | ||
ebb945a9 BS |
27 | #include <subdev/fb.h> |
28 | #include <subdev/vm.h> | |
29 | #include <subdev/instmem.h> | |
6ee73861 | 30 | |
ebb945a9 BS |
31 | #include "nouveau_drm.h" |
32 | #include "nouveau_ttm.h" | |
33 | #include "nouveau_gem.h" | |
6ee73861 | 34 | |
bc9e7b9a BS |
35 | static int |
36 | nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | |
37 | { | |
897a6e27 MS |
38 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
39 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | |
40 | man->priv = pfb; | |
bc9e7b9a BS |
41 | return 0; |
42 | } | |
43 | ||
44 | static int | |
45 | nouveau_vram_manager_fini(struct ttm_mem_type_manager *man) | |
46 | { | |
897a6e27 | 47 | man->priv = NULL; |
bc9e7b9a BS |
48 | return 0; |
49 | } | |
50 | ||
51 | static inline void | |
52 | nouveau_mem_node_cleanup(struct nouveau_mem *node) | |
53 | { | |
54 | if (node->vma[0].node) { | |
55 | nouveau_vm_unmap(&node->vma[0]); | |
56 | nouveau_vm_put(&node->vma[0]); | |
57 | } | |
58 | ||
59 | if (node->vma[1].node) { | |
60 | nouveau_vm_unmap(&node->vma[1]); | |
61 | nouveau_vm_put(&node->vma[1]); | |
62 | } | |
63 | } | |
64 | ||
65 | static void | |
66 | nouveau_vram_manager_del(struct ttm_mem_type_manager *man, | |
67 | struct ttm_mem_reg *mem) | |
68 | { | |
ebb945a9 BS |
69 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
70 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | |
bc9e7b9a | 71 | nouveau_mem_node_cleanup(mem->mm_node); |
ebb945a9 | 72 | pfb->ram.put(pfb, (struct nouveau_mem **)&mem->mm_node); |
bc9e7b9a BS |
73 | } |
74 | ||
75 | static int | |
76 | nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |
77 | struct ttm_buffer_object *bo, | |
78 | struct ttm_placement *placement, | |
79 | struct ttm_mem_reg *mem) | |
80 | { | |
ebb945a9 BS |
81 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
82 | struct nouveau_fb *pfb = nouveau_fb(drm->device); | |
bc9e7b9a BS |
83 | struct nouveau_bo *nvbo = nouveau_bo(bo); |
84 | struct nouveau_mem *node; | |
85 | u32 size_nc = 0; | |
86 | int ret; | |
87 | ||
88 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) | |
89 | size_nc = 1 << nvbo->page_shift; | |
90 | ||
ebb945a9 BS |
91 | ret = pfb->ram.get(pfb, mem->num_pages << PAGE_SHIFT, |
92 | mem->page_alignment << PAGE_SHIFT, size_nc, | |
93 | (nvbo->tile_flags >> 8) & 0x3ff, &node); | |
bc9e7b9a BS |
94 | if (ret) { |
95 | mem->mm_node = NULL; | |
96 | return (ret == -ENOSPC) ? 0 : ret; | |
97 | } | |
98 | ||
99 | node->page_shift = nvbo->page_shift; | |
100 | ||
101 | mem->mm_node = node; | |
102 | mem->start = node->offset >> PAGE_SHIFT; | |
103 | return 0; | |
104 | } | |
105 | ||
5b8a43ae | 106 | static void |
bc9e7b9a BS |
107 | nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
108 | { | |
897a6e27 MS |
109 | struct nouveau_fb *pfb = man->priv; |
110 | struct nouveau_mm *mm = &pfb->vram; | |
bc9e7b9a BS |
111 | struct nouveau_mm_node *r; |
112 | u32 total = 0, free = 0; | |
113 | ||
114 | mutex_lock(&mm->mutex); | |
115 | list_for_each_entry(r, &mm->nodes, nl_entry) { | |
116 | printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n", | |
117 | prefix, r->type, ((u64)r->offset << 12), | |
118 | (((u64)r->offset + r->length) << 12)); | |
119 | ||
120 | total += r->length; | |
121 | if (!r->type) | |
122 | free += r->length; | |
123 | } | |
124 | mutex_unlock(&mm->mutex); | |
125 | ||
126 | printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n", | |
127 | prefix, (u64)total << 12, (u64)free << 12); | |
128 | printk(KERN_DEBUG "%s block: 0x%08x\n", | |
129 | prefix, mm->block_size << 12); | |
130 | } | |
131 | ||
132 | const struct ttm_mem_type_manager_func nouveau_vram_manager = { | |
133 | nouveau_vram_manager_init, | |
134 | nouveau_vram_manager_fini, | |
135 | nouveau_vram_manager_new, | |
136 | nouveau_vram_manager_del, | |
137 | nouveau_vram_manager_debug | |
138 | }; | |
139 | ||
140 | static int | |
141 | nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | |
142 | { | |
143 | return 0; | |
144 | } | |
145 | ||
146 | static int | |
147 | nouveau_gart_manager_fini(struct ttm_mem_type_manager *man) | |
148 | { | |
149 | return 0; | |
150 | } | |
151 | ||
152 | static void | |
153 | nouveau_gart_manager_del(struct ttm_mem_type_manager *man, | |
154 | struct ttm_mem_reg *mem) | |
155 | { | |
156 | nouveau_mem_node_cleanup(mem->mm_node); | |
157 | kfree(mem->mm_node); | |
158 | mem->mm_node = NULL; | |
159 | } | |
160 | ||
161 | static int | |
162 | nouveau_gart_manager_new(struct ttm_mem_type_manager *man, | |
163 | struct ttm_buffer_object *bo, | |
164 | struct ttm_placement *placement, | |
165 | struct ttm_mem_reg *mem) | |
166 | { | |
de7b7d59 BS |
167 | struct nouveau_drm *drm = nouveau_bdev(bo->bdev); |
168 | struct nouveau_bo *nvbo = nouveau_bo(bo); | |
bc9e7b9a BS |
169 | struct nouveau_mem *node; |
170 | ||
ebb945a9 | 171 | if (unlikely((mem->num_pages << PAGE_SHIFT) >= 512 * 1024 * 1024)) |
bc9e7b9a BS |
172 | return -ENOMEM; |
173 | ||
174 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
175 | if (!node) | |
176 | return -ENOMEM; | |
177 | node->page_shift = 12; | |
178 | ||
de7b7d59 BS |
179 | switch (nv_device(drm->device)->card_type) { |
180 | case NV_50: | |
181 | if (nv_device(drm->device)->chipset != 0x50) | |
182 | node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; | |
183 | break; | |
184 | case NV_C0: | |
185 | case NV_D0: | |
186 | case NV_E0: | |
187 | node->memtype = (nvbo->tile_flags & 0xff00) >> 8; | |
188 | break; | |
189 | default: | |
190 | break; | |
191 | } | |
192 | ||
bc9e7b9a BS |
193 | mem->mm_node = node; |
194 | mem->start = 0; | |
195 | return 0; | |
196 | } | |
197 | ||
5b8a43ae | 198 | static void |
bc9e7b9a BS |
199 | nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
200 | { | |
201 | } | |
202 | ||
203 | const struct ttm_mem_type_manager_func nouveau_gart_manager = { | |
204 | nouveau_gart_manager_init, | |
205 | nouveau_gart_manager_fini, | |
206 | nouveau_gart_manager_new, | |
207 | nouveau_gart_manager_del, | |
208 | nouveau_gart_manager_debug | |
209 | }; | |
210 | ||
ebb945a9 | 211 | #include <core/subdev/vm/nv04.h> |
bc9e7b9a BS |
212 | static int |
213 | nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) | |
214 | { | |
ebb945a9 BS |
215 | struct nouveau_drm *drm = nouveau_bdev(man->bdev); |
216 | struct nouveau_vmmgr *vmm = nouveau_vmmgr(drm->device); | |
217 | struct nv04_vmmgr_priv *priv = (void *)vmm; | |
218 | struct nouveau_vm *vm = NULL; | |
219 | nouveau_vm_ref(priv->vm, &vm, NULL); | |
220 | man->priv = vm; | |
221 | return 0; | |
bc9e7b9a BS |
222 | } |
223 | ||
224 | static int | |
225 | nv04_gart_manager_fini(struct ttm_mem_type_manager *man) | |
226 | { | |
227 | struct nouveau_vm *vm = man->priv; | |
228 | nouveau_vm_ref(NULL, &vm, NULL); | |
229 | man->priv = NULL; | |
230 | return 0; | |
231 | } | |
232 | ||
233 | static void | |
234 | nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem) | |
235 | { | |
236 | struct nouveau_mem *node = mem->mm_node; | |
237 | if (node->vma[0].node) | |
238 | nouveau_vm_put(&node->vma[0]); | |
239 | kfree(mem->mm_node); | |
240 | mem->mm_node = NULL; | |
241 | } | |
242 | ||
243 | static int | |
244 | nv04_gart_manager_new(struct ttm_mem_type_manager *man, | |
245 | struct ttm_buffer_object *bo, | |
246 | struct ttm_placement *placement, | |
247 | struct ttm_mem_reg *mem) | |
248 | { | |
249 | struct nouveau_mem *node; | |
250 | int ret; | |
251 | ||
252 | node = kzalloc(sizeof(*node), GFP_KERNEL); | |
253 | if (!node) | |
254 | return -ENOMEM; | |
255 | ||
256 | node->page_shift = 12; | |
257 | ||
258 | ret = nouveau_vm_get(man->priv, mem->num_pages << 12, node->page_shift, | |
259 | NV_MEM_ACCESS_RW, &node->vma[0]); | |
260 | if (ret) { | |
261 | kfree(node); | |
262 | return ret; | |
263 | } | |
264 | ||
265 | mem->mm_node = node; | |
266 | mem->start = node->vma[0].offset >> PAGE_SHIFT; | |
267 | return 0; | |
268 | } | |
269 | ||
5b8a43ae | 270 | static void |
bc9e7b9a BS |
271 | nv04_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix) |
272 | { | |
273 | } | |
274 | ||
275 | const struct ttm_mem_type_manager_func nv04_gart_manager = { | |
276 | nv04_gart_manager_init, | |
277 | nv04_gart_manager_fini, | |
278 | nv04_gart_manager_new, | |
279 | nv04_gart_manager_del, | |
280 | nv04_gart_manager_debug | |
281 | }; | |
282 | ||
6ee73861 BS |
283 | int |
284 | nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma) | |
285 | { | |
286 | struct drm_file *file_priv = filp->private_data; | |
77145f1c | 287 | struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); |
6ee73861 BS |
288 | |
289 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) | |
290 | return drm_mmap(filp, vma); | |
291 | ||
ebb945a9 | 292 | return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); |
6ee73861 BS |
293 | } |
294 | ||
295 | static int | |
ba4420c2 | 296 | nouveau_ttm_mem_global_init(struct drm_global_reference *ref) |
6ee73861 BS |
297 | { |
298 | return ttm_mem_global_init(ref->object); | |
299 | } | |
300 | ||
301 | static void | |
ba4420c2 | 302 | nouveau_ttm_mem_global_release(struct drm_global_reference *ref) |
6ee73861 BS |
303 | { |
304 | ttm_mem_global_release(ref->object); | |
305 | } | |
306 | ||
307 | int | |
ebb945a9 | 308 | nouveau_ttm_global_init(struct nouveau_drm *drm) |
6ee73861 | 309 | { |
ba4420c2 | 310 | struct drm_global_reference *global_ref; |
6ee73861 BS |
311 | int ret; |
312 | ||
ebb945a9 | 313 | global_ref = &drm->ttm.mem_global_ref; |
ba4420c2 | 314 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
6ee73861 BS |
315 | global_ref->size = sizeof(struct ttm_mem_global); |
316 | global_ref->init = &nouveau_ttm_mem_global_init; | |
317 | global_ref->release = &nouveau_ttm_mem_global_release; | |
318 | ||
ba4420c2 | 319 | ret = drm_global_item_ref(global_ref); |
6ee73861 BS |
320 | if (unlikely(ret != 0)) { |
321 | DRM_ERROR("Failed setting up TTM memory accounting\n"); | |
ebb945a9 | 322 | drm->ttm.mem_global_ref.release = NULL; |
6ee73861 BS |
323 | return ret; |
324 | } | |
325 | ||
ebb945a9 BS |
326 | drm->ttm.bo_global_ref.mem_glob = global_ref->object; |
327 | global_ref = &drm->ttm.bo_global_ref.ref; | |
ba4420c2 | 328 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
6ee73861 BS |
329 | global_ref->size = sizeof(struct ttm_bo_global); |
330 | global_ref->init = &ttm_bo_global_init; | |
331 | global_ref->release = &ttm_bo_global_release; | |
332 | ||
ba4420c2 | 333 | ret = drm_global_item_ref(global_ref); |
6ee73861 BS |
334 | if (unlikely(ret != 0)) { |
335 | DRM_ERROR("Failed setting up TTM BO subsystem\n"); | |
ebb945a9 BS |
336 | drm_global_item_unref(&drm->ttm.mem_global_ref); |
337 | drm->ttm.mem_global_ref.release = NULL; | |
6ee73861 BS |
338 | return ret; |
339 | } | |
340 | ||
341 | return 0; | |
342 | } | |
343 | ||
344 | void | |
ebb945a9 | 345 | nouveau_ttm_global_release(struct nouveau_drm *drm) |
6ee73861 | 346 | { |
ebb945a9 | 347 | if (drm->ttm.mem_global_ref.release == NULL) |
6ee73861 BS |
348 | return; |
349 | ||
ebb945a9 BS |
350 | drm_global_item_unref(&drm->ttm.bo_global_ref.ref); |
351 | drm_global_item_unref(&drm->ttm.mem_global_ref); | |
352 | drm->ttm.mem_global_ref.release = NULL; | |
353 | } | |
354 | ||
355 | int | |
356 | nouveau_ttm_init(struct nouveau_drm *drm) | |
357 | { | |
358 | struct drm_device *dev = drm->dev; | |
359 | u32 bits; | |
360 | int ret; | |
361 | ||
dc73b45a BS |
362 | bits = nouveau_vmmgr(drm->device)->dma_bits; |
363 | if ( drm->agp.stat == ENABLED || | |
364 | !pci_dma_supported(dev->pdev, DMA_BIT_MASK(bits))) | |
ebb945a9 | 365 | bits = 32; |
ebb945a9 BS |
366 | |
367 | ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); | |
368 | if (ret) | |
369 | return ret; | |
370 | ||
371 | ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(bits)); | |
372 | if (ret) | |
373 | pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32)); | |
374 | ||
375 | ret = nouveau_ttm_global_init(drm); | |
376 | if (ret) | |
377 | return ret; | |
378 | ||
379 | ret = ttm_bo_device_init(&drm->ttm.bdev, | |
380 | drm->ttm.bo_global_ref.ref.object, | |
381 | &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET, | |
382 | bits <= 32 ? true : false); | |
383 | if (ret) { | |
384 | NV_ERROR(drm, "error initialising bo driver, %d\n", ret); | |
385 | return ret; | |
386 | } | |
387 | ||
388 | /* VRAM init */ | |
389 | drm->gem.vram_available = nouveau_fb(drm->device)->ram.size; | |
390 | drm->gem.vram_available -= nouveau_instmem(drm->device)->reserved; | |
391 | ||
392 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_VRAM, | |
393 | drm->gem.vram_available >> PAGE_SHIFT); | |
394 | if (ret) { | |
395 | NV_ERROR(drm, "VRAM mm init failed, %d\n", ret); | |
396 | return ret; | |
397 | } | |
398 | ||
399 | drm->ttm.mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1), | |
400 | pci_resource_len(dev->pdev, 1), | |
401 | DRM_MTRR_WC); | |
402 | ||
403 | /* GART init */ | |
404 | if (drm->agp.stat != ENABLED) { | |
405 | drm->gem.gart_available = nouveau_vmmgr(drm->device)->limit; | |
406 | if (drm->gem.gart_available > 512 * 1024 * 1024) | |
407 | drm->gem.gart_available = 512 * 1024 * 1024; | |
408 | } else { | |
409 | drm->gem.gart_available = drm->agp.size; | |
410 | } | |
411 | ||
412 | ret = ttm_bo_init_mm(&drm->ttm.bdev, TTM_PL_TT, | |
413 | drm->gem.gart_available >> PAGE_SHIFT); | |
414 | if (ret) { | |
415 | NV_ERROR(drm, "GART mm init failed, %d\n", ret); | |
416 | return ret; | |
417 | } | |
418 | ||
419 | NV_INFO(drm, "VRAM: %d MiB\n", (u32)(drm->gem.vram_available >> 20)); | |
420 | NV_INFO(drm, "GART: %d MiB\n", (u32)(drm->gem.gart_available >> 20)); | |
421 | return 0; | |
422 | } | |
423 | ||
424 | void | |
425 | nouveau_ttm_fini(struct nouveau_drm *drm) | |
426 | { | |
427 | mutex_lock(&drm->dev->struct_mutex); | |
428 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM); | |
429 | ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT); | |
430 | mutex_unlock(&drm->dev->struct_mutex); | |
431 | ||
432 | ttm_bo_device_release(&drm->ttm.bdev); | |
433 | ||
434 | nouveau_ttm_global_release(drm); | |
435 | ||
436 | if (drm->ttm.mtrr >= 0) { | |
437 | drm_mtrr_del(drm->ttm.mtrr, | |
438 | pci_resource_start(drm->dev->pdev, 1), | |
439 | pci_resource_len(drm->dev->pdev, 1), DRM_MTRR_WC); | |
440 | drm->ttm.mtrr = -1; | |
441 | } | |
6ee73861 | 442 | } |