drm/ttm: split no_wait argument in 2 GPU or reserve wait
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_ttm.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <ttm/ttm_bo_api.h>
33#include <ttm/ttm_bo_driver.h>
34#include <ttm/ttm_placement.h>
35#include <ttm/ttm_module.h>
36#include <drm/drmP.h>
37#include <drm/radeon_drm.h>
fa8a1238 38#include <linux/seq_file.h>
771fe6b9
JG
39#include "radeon_reg.h"
40#include "radeon.h"
41
42#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
43
fa8a1238
DA
44static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
45
771fe6b9
JG
46static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
47{
48 struct radeon_mman *mman;
49 struct radeon_device *rdev;
50
51 mman = container_of(bdev, struct radeon_mman, bdev);
52 rdev = container_of(mman, struct radeon_device, mman);
53 return rdev;
54}
55
56
57/*
58 * Global memory.
59 */
60static int radeon_ttm_mem_global_init(struct ttm_global_reference *ref)
61{
62 return ttm_mem_global_init(ref->object);
63}
64
65static void radeon_ttm_mem_global_release(struct ttm_global_reference *ref)
66{
67 ttm_mem_global_release(ref->object);
68}
69
70static int radeon_ttm_global_init(struct radeon_device *rdev)
71{
72 struct ttm_global_reference *global_ref;
73 int r;
74
75 rdev->mman.mem_global_referenced = false;
76 global_ref = &rdev->mman.mem_global_ref;
77 global_ref->global_type = TTM_GLOBAL_TTM_MEM;
78 global_ref->size = sizeof(struct ttm_mem_global);
79 global_ref->init = &radeon_ttm_mem_global_init;
80 global_ref->release = &radeon_ttm_mem_global_release;
81 r = ttm_global_item_ref(global_ref);
82 if (r != 0) {
a987fcaa
TH
83 DRM_ERROR("Failed setting up TTM memory accounting "
84 "subsystem.\n");
771fe6b9
JG
85 return r;
86 }
a987fcaa
TH
87
88 rdev->mman.bo_global_ref.mem_glob =
89 rdev->mman.mem_global_ref.object;
90 global_ref = &rdev->mman.bo_global_ref.ref;
91 global_ref->global_type = TTM_GLOBAL_TTM_BO;
7f5f4db2 92 global_ref->size = sizeof(struct ttm_bo_global);
a987fcaa
TH
93 global_ref->init = &ttm_bo_global_init;
94 global_ref->release = &ttm_bo_global_release;
95 r = ttm_global_item_ref(global_ref);
96 if (r != 0) {
97 DRM_ERROR("Failed setting up TTM BO subsystem.\n");
98 ttm_global_item_unref(&rdev->mman.mem_global_ref);
99 return r;
100 }
101
771fe6b9
JG
102 rdev->mman.mem_global_referenced = true;
103 return 0;
104}
105
106static void radeon_ttm_global_fini(struct radeon_device *rdev)
107{
108 if (rdev->mman.mem_global_referenced) {
a987fcaa 109 ttm_global_item_unref(&rdev->mman.bo_global_ref.ref);
771fe6b9
JG
110 ttm_global_item_unref(&rdev->mman.mem_global_ref);
111 rdev->mman.mem_global_referenced = false;
112 }
113}
114
115struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);
116
117static struct ttm_backend*
118radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
119{
120 struct radeon_device *rdev;
121
122 rdev = radeon_get_rdev(bdev);
123#if __OS_HAS_AGP
124 if (rdev->flags & RADEON_IS_AGP) {
125 return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
126 } else
127#endif
128 {
129 return radeon_ttm_backend_create(rdev);
130 }
131}
132
133static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
134{
135 return 0;
136}
137
138static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
139 struct ttm_mem_type_manager *man)
140{
141 struct radeon_device *rdev;
142
143 rdev = radeon_get_rdev(bdev);
144
145 switch (type) {
146 case TTM_PL_SYSTEM:
147 /* System memory */
148 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
149 man->available_caching = TTM_PL_MASK_CACHING;
150 man->default_caching = TTM_PL_FLAG_CACHED;
151 break;
152 case TTM_PL_TT:
d594e46a 153 man->gpu_offset = rdev->mc.gtt_start;
771fe6b9
JG
154 man->available_caching = TTM_PL_MASK_CACHING;
155 man->default_caching = TTM_PL_FLAG_CACHED;
55c93278 156 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
771fe6b9
JG
157#if __OS_HAS_AGP
158 if (rdev->flags & RADEON_IS_AGP) {
159 if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
160 DRM_ERROR("AGP is not enabled for memory type %u\n",
161 (unsigned)type);
162 return -EINVAL;
163 }
164 man->io_offset = rdev->mc.agp_base;
165 man->io_size = rdev->mc.gtt_size;
166 man->io_addr = NULL;
55c93278
MD
167 if (!rdev->ddev->agp->cant_use_aperture)
168 man->flags = TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
169 TTM_MEMTYPE_FLAG_MAPPABLE;
771fe6b9
JG
170 man->available_caching = TTM_PL_FLAG_UNCACHED |
171 TTM_PL_FLAG_WC;
172 man->default_caching = TTM_PL_FLAG_WC;
173 } else
174#endif
175 {
176 man->io_offset = 0;
177 man->io_size = 0;
178 man->io_addr = NULL;
771fe6b9
JG
179 }
180 break;
181 case TTM_PL_VRAM:
182 /* "On-card" video ram */
d594e46a 183 man->gpu_offset = rdev->mc.vram_start;
771fe6b9
JG
184 man->flags = TTM_MEMTYPE_FLAG_FIXED |
185 TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
186 TTM_MEMTYPE_FLAG_MAPPABLE;
187 man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
188 man->default_caching = TTM_PL_FLAG_WC;
189 man->io_addr = NULL;
190 man->io_offset = rdev->mc.aper_base;
191 man->io_size = rdev->mc.aper_size;
192 break;
193 default:
194 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
195 return -EINVAL;
196 }
197 return 0;
198}
199
312ea8da
JG
200static void radeon_evict_flags(struct ttm_buffer_object *bo,
201 struct ttm_placement *placement)
771fe6b9 202{
d03d8589
JG
203 struct radeon_bo *rbo;
204 static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
205
206 if (!radeon_ttm_bo_is_radeon_bo(bo)) {
207 placement->fpfn = 0;
208 placement->lpfn = 0;
209 placement->placement = &placements;
210 placement->busy_placement = &placements;
211 placement->num_placement = 1;
212 placement->num_busy_placement = 1;
213 return;
214 }
215 rbo = container_of(bo, struct radeon_bo, tbo);
771fe6b9 216 switch (bo->mem.mem_type) {
312ea8da 217 case TTM_PL_VRAM:
9270eb1b
DA
218 if (rbo->rdev->cp.ready == false)
219 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
220 else
221 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
312ea8da
JG
222 break;
223 case TTM_PL_TT:
771fe6b9 224 default:
312ea8da 225 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
771fe6b9 226 }
eaa5fd1a 227 *placement = rbo->placement;
771fe6b9
JG
228}
229
230static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
231{
232 return 0;
233}
234
235static void radeon_move_null(struct ttm_buffer_object *bo,
236 struct ttm_mem_reg *new_mem)
237{
238 struct ttm_mem_reg *old_mem = &bo->mem;
239
240 BUG_ON(old_mem->mm_node != NULL);
241 *old_mem = *new_mem;
242 new_mem->mm_node = NULL;
243}
244
245static int radeon_move_blit(struct ttm_buffer_object *bo,
9d87fa21
JG
246 bool evict, int no_wait_reserve, bool no_wait_gpu,
247 struct ttm_mem_reg *new_mem,
248 struct ttm_mem_reg *old_mem)
771fe6b9
JG
249{
250 struct radeon_device *rdev;
251 uint64_t old_start, new_start;
252 struct radeon_fence *fence;
253 int r;
254
255 rdev = radeon_get_rdev(bo->bdev);
256 r = radeon_fence_create(rdev, &fence);
257 if (unlikely(r)) {
258 return r;
259 }
260 old_start = old_mem->mm_node->start << PAGE_SHIFT;
261 new_start = new_mem->mm_node->start << PAGE_SHIFT;
262
263 switch (old_mem->mem_type) {
264 case TTM_PL_VRAM:
d594e46a 265 old_start += rdev->mc.vram_start;
771fe6b9
JG
266 break;
267 case TTM_PL_TT:
d594e46a 268 old_start += rdev->mc.gtt_start;
771fe6b9
JG
269 break;
270 default:
271 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
272 return -EINVAL;
273 }
274 switch (new_mem->mem_type) {
275 case TTM_PL_VRAM:
d594e46a 276 new_start += rdev->mc.vram_start;
771fe6b9
JG
277 break;
278 case TTM_PL_TT:
d594e46a 279 new_start += rdev->mc.gtt_start;
771fe6b9
JG
280 break;
281 default:
282 DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
283 return -EINVAL;
284 }
285 if (!rdev->cp.ready) {
286 DRM_ERROR("Trying to move memory with CP turned off.\n");
287 return -EINVAL;
288 }
289 r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
290 /* FIXME: handle copy error */
291 r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
9d87fa21 292 evict, no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9
JG
293 radeon_fence_unref(&fence);
294 return r;
295}
296
297static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
9d87fa21
JG
298 bool evict, bool interruptible,
299 bool no_wait_reserve, bool no_wait_gpu,
771fe6b9
JG
300 struct ttm_mem_reg *new_mem)
301{
302 struct radeon_device *rdev;
303 struct ttm_mem_reg *old_mem = &bo->mem;
304 struct ttm_mem_reg tmp_mem;
312ea8da
JG
305 u32 placements;
306 struct ttm_placement placement;
771fe6b9
JG
307 int r;
308
309 rdev = radeon_get_rdev(bo->bdev);
310 tmp_mem = *new_mem;
311 tmp_mem.mm_node = NULL;
312ea8da
JG
312 placement.fpfn = 0;
313 placement.lpfn = 0;
314 placement.num_placement = 1;
315 placement.placement = &placements;
316 placement.num_busy_placement = 1;
317 placement.busy_placement = &placements;
318 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
319 r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
9d87fa21 320 interruptible, no_wait_reserve, no_wait_gpu);
771fe6b9
JG
321 if (unlikely(r)) {
322 return r;
323 }
df67bed9
DA
324
325 r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
326 if (unlikely(r)) {
327 goto out_cleanup;
328 }
329
771fe6b9
JG
330 r = ttm_tt_bind(bo->ttm, &tmp_mem);
331 if (unlikely(r)) {
332 goto out_cleanup;
333 }
9d87fa21 334 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
771fe6b9
JG
335 if (unlikely(r)) {
336 goto out_cleanup;
337 }
9d87fa21 338 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9
JG
339out_cleanup:
340 if (tmp_mem.mm_node) {
a987fcaa
TH
341 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
342
343 spin_lock(&glob->lru_lock);
771fe6b9 344 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 345 spin_unlock(&glob->lru_lock);
771fe6b9
JG
346 return r;
347 }
348 return r;
349}
350
351static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
9d87fa21
JG
352 bool evict, bool interruptible,
353 bool no_wait_reserve, bool no_wait_gpu,
771fe6b9
JG
354 struct ttm_mem_reg *new_mem)
355{
356 struct radeon_device *rdev;
357 struct ttm_mem_reg *old_mem = &bo->mem;
358 struct ttm_mem_reg tmp_mem;
312ea8da
JG
359 struct ttm_placement placement;
360 u32 placements;
771fe6b9
JG
361 int r;
362
363 rdev = radeon_get_rdev(bo->bdev);
364 tmp_mem = *new_mem;
365 tmp_mem.mm_node = NULL;
312ea8da
JG
366 placement.fpfn = 0;
367 placement.lpfn = 0;
368 placement.num_placement = 1;
369 placement.placement = &placements;
370 placement.num_busy_placement = 1;
371 placement.busy_placement = &placements;
372 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
9d87fa21 373 r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
771fe6b9
JG
374 if (unlikely(r)) {
375 return r;
376 }
9d87fa21 377 r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
771fe6b9
JG
378 if (unlikely(r)) {
379 goto out_cleanup;
380 }
9d87fa21 381 r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
771fe6b9
JG
382 if (unlikely(r)) {
383 goto out_cleanup;
384 }
385out_cleanup:
386 if (tmp_mem.mm_node) {
a987fcaa
TH
387 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
388
389 spin_lock(&glob->lru_lock);
771fe6b9 390 drm_mm_put_block(tmp_mem.mm_node);
a987fcaa 391 spin_unlock(&glob->lru_lock);
771fe6b9
JG
392 return r;
393 }
394 return r;
395}
396
397static int radeon_bo_move(struct ttm_buffer_object *bo,
9d87fa21
JG
398 bool evict, bool interruptible,
399 bool no_wait_reserve, bool no_wait_gpu,
400 struct ttm_mem_reg *new_mem)
771fe6b9
JG
401{
402 struct radeon_device *rdev;
403 struct ttm_mem_reg *old_mem = &bo->mem;
404 int r;
405
406 rdev = radeon_get_rdev(bo->bdev);
407 if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
408 radeon_move_null(bo, new_mem);
409 return 0;
410 }
411 if ((old_mem->mem_type == TTM_PL_TT &&
412 new_mem->mem_type == TTM_PL_SYSTEM) ||
413 (old_mem->mem_type == TTM_PL_SYSTEM &&
414 new_mem->mem_type == TTM_PL_TT)) {
af901ca1 415 /* bind is enough */
771fe6b9
JG
416 radeon_move_null(bo, new_mem);
417 return 0;
418 }
3ce0a23d 419 if (!rdev->cp.ready || rdev->asic->copy == NULL) {
771fe6b9 420 /* use memcpy */
1ab2e105 421 goto memcpy;
771fe6b9
JG
422 }
423
424 if (old_mem->mem_type == TTM_PL_VRAM &&
425 new_mem->mem_type == TTM_PL_SYSTEM) {
1ab2e105 426 r = radeon_move_vram_ram(bo, evict, interruptible,
9d87fa21 427 no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9
JG
428 } else if (old_mem->mem_type == TTM_PL_SYSTEM &&
429 new_mem->mem_type == TTM_PL_VRAM) {
1ab2e105 430 r = radeon_move_ram_vram(bo, evict, interruptible,
9d87fa21 431 no_wait_reserve, no_wait_gpu, new_mem);
771fe6b9 432 } else {
9d87fa21 433 r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
771fe6b9 434 }
1ab2e105
MD
435
436 if (r) {
437memcpy:
9d87fa21 438 r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
1ab2e105
MD
439 }
440
771fe6b9
JG
441 return r;
442}
443
771fe6b9
JG
444static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
445 bool lazy, bool interruptible)
446{
447 return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
448}
449
450static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
451{
452 return 0;
453}
454
455static void radeon_sync_obj_unref(void **sync_obj)
456{
457 radeon_fence_unref((struct radeon_fence **)sync_obj);
458}
459
460static void *radeon_sync_obj_ref(void *sync_obj)
461{
462 return radeon_fence_ref((struct radeon_fence *)sync_obj);
463}
464
465static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
466{
467 return radeon_fence_signaled((struct radeon_fence *)sync_obj);
468}
469
470static struct ttm_bo_driver radeon_bo_driver = {
771fe6b9
JG
471 .create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
472 .invalidate_caches = &radeon_invalidate_caches,
473 .init_mem_type = &radeon_init_mem_type,
474 .evict_flags = &radeon_evict_flags,
475 .move = &radeon_bo_move,
476 .verify_access = &radeon_verify_access,
477 .sync_obj_signaled = &radeon_sync_obj_signaled,
478 .sync_obj_wait = &radeon_sync_obj_wait,
479 .sync_obj_flush = &radeon_sync_obj_flush,
480 .sync_obj_unref = &radeon_sync_obj_unref,
481 .sync_obj_ref = &radeon_sync_obj_ref,
e024e110
DA
482 .move_notify = &radeon_bo_move_notify,
483 .fault_reserve_notify = &radeon_bo_fault_reserve_notify,
771fe6b9
JG
484};
485
486int radeon_ttm_init(struct radeon_device *rdev)
487{
488 int r;
489
490 r = radeon_ttm_global_init(rdev);
491 if (r) {
492 return r;
493 }
494 /* No others user of address space so set it to 0 */
495 r = ttm_bo_device_init(&rdev->mman.bdev,
a987fcaa 496 rdev->mman.bo_global_ref.ref.object,
ad49f501
DA
497 &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
498 rdev->need_dma32);
771fe6b9
JG
499 if (r) {
500 DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
501 return r;
502 }
0a0c7596 503 rdev->mman.initialized = true;
4c788679 504 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
312ea8da 505 rdev->mc.real_vram_size >> PAGE_SHIFT);
771fe6b9
JG
506 if (r) {
507 DRM_ERROR("Failed initializing VRAM heap.\n");
508 return r;
509 }
4c788679
JG
510 r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
511 RADEON_GEM_DOMAIN_VRAM,
512 &rdev->stollen_vga_memory);
771fe6b9
JG
513 if (r) {
514 return r;
515 }
4c788679
JG
516 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
517 if (r)
518 return r;
519 r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
520 radeon_bo_unreserve(rdev->stollen_vga_memory);
771fe6b9 521 if (r) {
4c788679 522 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
523 return r;
524 }
525 DRM_INFO("radeon: %uM of VRAM memory ready\n",
3ce0a23d 526 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
4c788679 527 r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
312ea8da 528 rdev->mc.gtt_size >> PAGE_SHIFT);
771fe6b9
JG
529 if (r) {
530 DRM_ERROR("Failed initializing GTT heap.\n");
531 return r;
532 }
533 DRM_INFO("radeon: %uM of GTT memory ready.\n",
3ce0a23d 534 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
771fe6b9
JG
535 if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
536 rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
537 }
fa8a1238
DA
538
539 r = radeon_ttm_debugfs_init(rdev);
540 if (r) {
541 DRM_ERROR("Failed to init debugfs\n");
542 return r;
543 }
771fe6b9
JG
544 return 0;
545}
546
547void radeon_ttm_fini(struct radeon_device *rdev)
548{
4c788679
JG
549 int r;
550
0a0c7596
JG
551 if (!rdev->mman.initialized)
552 return;
771fe6b9 553 if (rdev->stollen_vga_memory) {
4c788679
JG
554 r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
555 if (r == 0) {
556 radeon_bo_unpin(rdev->stollen_vga_memory);
557 radeon_bo_unreserve(rdev->stollen_vga_memory);
558 }
559 radeon_bo_unref(&rdev->stollen_vga_memory);
771fe6b9
JG
560 }
561 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
562 ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
563 ttm_bo_device_release(&rdev->mman.bdev);
564 radeon_gart_fini(rdev);
565 radeon_ttm_global_fini(rdev);
0a0c7596 566 rdev->mman.initialized = false;
771fe6b9
JG
567 DRM_INFO("radeon: ttm finalized\n");
568}
569
570static struct vm_operations_struct radeon_ttm_vm_ops;
f0f37e2f 571static const struct vm_operations_struct *ttm_vm_ops = NULL;
771fe6b9
JG
572
573static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
574{
575 struct ttm_buffer_object *bo;
576 int r;
577
578 bo = (struct ttm_buffer_object *)vma->vm_private_data;
579 if (bo == NULL) {
580 return VM_FAULT_NOPAGE;
581 }
582 r = ttm_vm_ops->fault(vma, vmf);
583 return r;
584}
585
586int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
587{
588 struct drm_file *file_priv;
589 struct radeon_device *rdev;
590 int r;
591
592 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
593 return drm_mmap(filp, vma);
594 }
595
596 file_priv = (struct drm_file *)filp->private_data;
597 rdev = file_priv->minor->dev->dev_private;
598 if (rdev == NULL) {
599 return -EINVAL;
600 }
601 r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
602 if (unlikely(r != 0)) {
603 return r;
604 }
605 if (unlikely(ttm_vm_ops == NULL)) {
606 ttm_vm_ops = vma->vm_ops;
607 radeon_ttm_vm_ops = *ttm_vm_ops;
608 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
609 }
610 vma->vm_ops = &radeon_ttm_vm_ops;
611 return 0;
612}
613
614
615/*
616 * TTM backend functions.
617 */
618struct radeon_ttm_backend {
619 struct ttm_backend backend;
620 struct radeon_device *rdev;
621 unsigned long num_pages;
622 struct page **pages;
623 struct page *dummy_read_page;
624 bool populated;
625 bool bound;
626 unsigned offset;
627};
628
629static int radeon_ttm_backend_populate(struct ttm_backend *backend,
630 unsigned long num_pages,
631 struct page **pages,
632 struct page *dummy_read_page)
633{
634 struct radeon_ttm_backend *gtt;
635
636 gtt = container_of(backend, struct radeon_ttm_backend, backend);
637 gtt->pages = pages;
638 gtt->num_pages = num_pages;
639 gtt->dummy_read_page = dummy_read_page;
640 gtt->populated = true;
641 return 0;
642}
643
644static void radeon_ttm_backend_clear(struct ttm_backend *backend)
645{
646 struct radeon_ttm_backend *gtt;
647
648 gtt = container_of(backend, struct radeon_ttm_backend, backend);
649 gtt->pages = NULL;
650 gtt->num_pages = 0;
651 gtt->dummy_read_page = NULL;
652 gtt->populated = false;
653 gtt->bound = false;
654}
655
656
657static int radeon_ttm_backend_bind(struct ttm_backend *backend,
658 struct ttm_mem_reg *bo_mem)
659{
660 struct radeon_ttm_backend *gtt;
661 int r;
662
663 gtt = container_of(backend, struct radeon_ttm_backend, backend);
664 gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
665 if (!gtt->num_pages) {
666 WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
667 }
668 r = radeon_gart_bind(gtt->rdev, gtt->offset,
669 gtt->num_pages, gtt->pages);
670 if (r) {
671 DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
672 gtt->num_pages, gtt->offset);
673 return r;
674 }
675 gtt->bound = true;
676 return 0;
677}
678
679static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
680{
681 struct radeon_ttm_backend *gtt;
682
683 gtt = container_of(backend, struct radeon_ttm_backend, backend);
684 radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
685 gtt->bound = false;
686 return 0;
687}
688
689static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
690{
691 struct radeon_ttm_backend *gtt;
692
693 gtt = container_of(backend, struct radeon_ttm_backend, backend);
694 if (gtt->bound) {
695 radeon_ttm_backend_unbind(backend);
696 }
697 kfree(gtt);
698}
699
700static struct ttm_backend_func radeon_backend_func = {
701 .populate = &radeon_ttm_backend_populate,
702 .clear = &radeon_ttm_backend_clear,
703 .bind = &radeon_ttm_backend_bind,
704 .unbind = &radeon_ttm_backend_unbind,
705 .destroy = &radeon_ttm_backend_destroy,
706};
707
708struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
709{
710 struct radeon_ttm_backend *gtt;
711
712 gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
713 if (gtt == NULL) {
714 return NULL;
715 }
716 gtt->backend.bdev = &rdev->mman.bdev;
717 gtt->backend.flags = 0;
718 gtt->backend.func = &radeon_backend_func;
719 gtt->rdev = rdev;
720 gtt->pages = NULL;
721 gtt->num_pages = 0;
722 gtt->dummy_read_page = NULL;
723 gtt->populated = false;
724 gtt->bound = false;
725 return &gtt->backend;
726}
fa8a1238
DA
727
728#define RADEON_DEBUGFS_MEM_TYPES 2
729
fa8a1238
DA
730#if defined(CONFIG_DEBUG_FS)
731static int radeon_mm_dump_table(struct seq_file *m, void *data)
732{
733 struct drm_info_node *node = (struct drm_info_node *)m->private;
734 struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
735 struct drm_device *dev = node->minor->dev;
736 struct radeon_device *rdev = dev->dev_private;
737 int ret;
738 struct ttm_bo_global *glob = rdev->mman.bdev.glob;
739
740 spin_lock(&glob->lru_lock);
741 ret = drm_mm_dump_table(m, mm);
742 spin_unlock(&glob->lru_lock);
743 return ret;
744}
745#endif
746
747static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
748{
f4e45d02
MP
749#if defined(CONFIG_DEBUG_FS)
750 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
751 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
fa8a1238
DA
752 unsigned i;
753
fa8a1238
DA
754 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
755 if (i == 0)
756 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
757 else
758 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
759 radeon_mem_types_list[i].name = radeon_mem_types_names[i];
760 radeon_mem_types_list[i].show = &radeon_mm_dump_table;
761 radeon_mem_types_list[i].driver_features = 0;
762 if (i == 0)
763 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
764 else
765 radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;
766
767 }
768 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES);
769
770#endif
771 return 0;
772}
This page took 0.213741 seconds and 5 git commands to generate.