Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
8d7cddcd | 36 | #include <ttm/ttm_page_alloc.h> |
771fe6b9 JG |
37 | #include <drm/drmP.h> |
38 | #include <drm/radeon_drm.h> | |
fa8a1238 | 39 | #include <linux/seq_file.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
771fe6b9 JG |
41 | #include "radeon_reg.h" |
42 | #include "radeon.h" | |
43 | ||
44 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
45 | ||
fa8a1238 DA |
46 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
47 | ||
771fe6b9 JG |
48 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
49 | { | |
50 | struct radeon_mman *mman; | |
51 | struct radeon_device *rdev; | |
52 | ||
53 | mman = container_of(bdev, struct radeon_mman, bdev); | |
54 | rdev = container_of(mman, struct radeon_device, mman); | |
55 | return rdev; | |
56 | } | |
57 | ||
58 | ||
59 | /* | |
60 | * Global memory. | |
61 | */ | |
ba4420c2 | 62 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) |
771fe6b9 JG |
63 | { |
64 | return ttm_mem_global_init(ref->object); | |
65 | } | |
66 | ||
ba4420c2 | 67 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) |
771fe6b9 JG |
68 | { |
69 | ttm_mem_global_release(ref->object); | |
70 | } | |
71 | ||
72 | static int radeon_ttm_global_init(struct radeon_device *rdev) | |
73 | { | |
ba4420c2 | 74 | struct drm_global_reference *global_ref; |
771fe6b9 JG |
75 | int r; |
76 | ||
77 | rdev->mman.mem_global_referenced = false; | |
78 | global_ref = &rdev->mman.mem_global_ref; | |
ba4420c2 | 79 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
771fe6b9 JG |
80 | global_ref->size = sizeof(struct ttm_mem_global); |
81 | global_ref->init = &radeon_ttm_mem_global_init; | |
82 | global_ref->release = &radeon_ttm_mem_global_release; | |
ba4420c2 | 83 | r = drm_global_item_ref(global_ref); |
771fe6b9 | 84 | if (r != 0) { |
a987fcaa TH |
85 | DRM_ERROR("Failed setting up TTM memory accounting " |
86 | "subsystem.\n"); | |
771fe6b9 JG |
87 | return r; |
88 | } | |
a987fcaa TH |
89 | |
90 | rdev->mman.bo_global_ref.mem_glob = | |
91 | rdev->mman.mem_global_ref.object; | |
92 | global_ref = &rdev->mman.bo_global_ref.ref; | |
ba4420c2 | 93 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
7f5f4db2 | 94 | global_ref->size = sizeof(struct ttm_bo_global); |
a987fcaa TH |
95 | global_ref->init = &ttm_bo_global_init; |
96 | global_ref->release = &ttm_bo_global_release; | |
ba4420c2 | 97 | r = drm_global_item_ref(global_ref); |
a987fcaa TH |
98 | if (r != 0) { |
99 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
ba4420c2 | 100 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
a987fcaa TH |
101 | return r; |
102 | } | |
103 | ||
771fe6b9 JG |
104 | rdev->mman.mem_global_referenced = true; |
105 | return 0; | |
106 | } | |
107 | ||
108 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | |
109 | { | |
110 | if (rdev->mman.mem_global_referenced) { | |
ba4420c2 DA |
111 | drm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
112 | drm_global_item_unref(&rdev->mman.mem_global_ref); | |
771fe6b9 JG |
113 | rdev->mman.mem_global_referenced = false; |
114 | } | |
115 | } | |
116 | ||
771fe6b9 JG |
117 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
118 | { | |
119 | return 0; | |
120 | } | |
121 | ||
122 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
123 | struct ttm_mem_type_manager *man) | |
124 | { | |
125 | struct radeon_device *rdev; | |
126 | ||
127 | rdev = radeon_get_rdev(bdev); | |
128 | ||
129 | switch (type) { | |
130 | case TTM_PL_SYSTEM: | |
131 | /* System memory */ | |
132 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
133 | man->available_caching = TTM_PL_MASK_CACHING; | |
134 | man->default_caching = TTM_PL_FLAG_CACHED; | |
135 | break; | |
136 | case TTM_PL_TT: | |
d961db75 | 137 | man->func = &ttm_bo_manager_func; |
d594e46a | 138 | man->gpu_offset = rdev->mc.gtt_start; |
771fe6b9 JG |
139 | man->available_caching = TTM_PL_MASK_CACHING; |
140 | man->default_caching = TTM_PL_FLAG_CACHED; | |
55c93278 | 141 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
771fe6b9 JG |
142 | #if __OS_HAS_AGP |
143 | if (rdev->flags & RADEON_IS_AGP) { | |
144 | if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) { | |
145 | DRM_ERROR("AGP is not enabled for memory type %u\n", | |
146 | (unsigned)type); | |
147 | return -EINVAL; | |
148 | } | |
55c93278 | 149 | if (!rdev->ddev->agp->cant_use_aperture) |
0a2d50e3 | 150 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
771fe6b9 JG |
151 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
152 | TTM_PL_FLAG_WC; | |
153 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 | 154 | } |
0c321c79 | 155 | #endif |
771fe6b9 JG |
156 | break; |
157 | case TTM_PL_VRAM: | |
158 | /* "On-card" video ram */ | |
d961db75 | 159 | man->func = &ttm_bo_manager_func; |
d594e46a | 160 | man->gpu_offset = rdev->mc.vram_start; |
771fe6b9 | 161 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
771fe6b9 JG |
162 | TTM_MEMTYPE_FLAG_MAPPABLE; |
163 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
164 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 JG |
165 | break; |
166 | default: | |
167 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
168 | return -EINVAL; | |
169 | } | |
170 | return 0; | |
171 | } | |
172 | ||
312ea8da JG |
173 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
174 | struct ttm_placement *placement) | |
771fe6b9 | 175 | { |
d03d8589 JG |
176 | struct radeon_bo *rbo; |
177 | static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
178 | ||
179 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { | |
180 | placement->fpfn = 0; | |
181 | placement->lpfn = 0; | |
182 | placement->placement = &placements; | |
183 | placement->busy_placement = &placements; | |
184 | placement->num_placement = 1; | |
185 | placement->num_busy_placement = 1; | |
186 | return; | |
187 | } | |
188 | rbo = container_of(bo, struct radeon_bo, tbo); | |
771fe6b9 | 189 | switch (bo->mem.mem_type) { |
312ea8da | 190 | case TTM_PL_VRAM: |
e32eb50d | 191 | if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) |
9270eb1b DA |
192 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
193 | else | |
194 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
312ea8da JG |
195 | break; |
196 | case TTM_PL_TT: | |
771fe6b9 | 197 | default: |
312ea8da | 198 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
771fe6b9 | 199 | } |
eaa5fd1a | 200 | *placement = rbo->placement; |
771fe6b9 JG |
201 | } |
202 | ||
203 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
204 | { | |
205 | return 0; | |
206 | } | |
207 | ||
208 | static void radeon_move_null(struct ttm_buffer_object *bo, | |
209 | struct ttm_mem_reg *new_mem) | |
210 | { | |
211 | struct ttm_mem_reg *old_mem = &bo->mem; | |
212 | ||
213 | BUG_ON(old_mem->mm_node != NULL); | |
214 | *old_mem = *new_mem; | |
215 | new_mem->mm_node = NULL; | |
216 | } | |
217 | ||
218 | static int radeon_move_blit(struct ttm_buffer_object *bo, | |
97a875cb | 219 | bool evict, bool no_wait_gpu, |
9d87fa21 JG |
220 | struct ttm_mem_reg *new_mem, |
221 | struct ttm_mem_reg *old_mem) | |
771fe6b9 JG |
222 | { |
223 | struct radeon_device *rdev; | |
224 | uint64_t old_start, new_start; | |
876dc9f3 | 225 | struct radeon_fence *fence; |
876dc9f3 | 226 | int r, ridx; |
771fe6b9 JG |
227 | |
228 | rdev = radeon_get_rdev(bo->bdev); | |
876dc9f3 | 229 | ridx = radeon_copy_ring_index(rdev); |
d961db75 BS |
230 | old_start = old_mem->start << PAGE_SHIFT; |
231 | new_start = new_mem->start << PAGE_SHIFT; | |
771fe6b9 JG |
232 | |
233 | switch (old_mem->mem_type) { | |
234 | case TTM_PL_VRAM: | |
d594e46a | 235 | old_start += rdev->mc.vram_start; |
771fe6b9 JG |
236 | break; |
237 | case TTM_PL_TT: | |
d594e46a | 238 | old_start += rdev->mc.gtt_start; |
771fe6b9 JG |
239 | break; |
240 | default: | |
241 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
242 | return -EINVAL; | |
243 | } | |
244 | switch (new_mem->mem_type) { | |
245 | case TTM_PL_VRAM: | |
d594e46a | 246 | new_start += rdev->mc.vram_start; |
771fe6b9 JG |
247 | break; |
248 | case TTM_PL_TT: | |
d594e46a | 249 | new_start += rdev->mc.gtt_start; |
771fe6b9 JG |
250 | break; |
251 | default: | |
252 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
253 | return -EINVAL; | |
254 | } | |
876dc9f3 | 255 | if (!rdev->ring[ridx].ready) { |
3000bf39 | 256 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
771fe6b9 JG |
257 | return -EINVAL; |
258 | } | |
003cefe0 AD |
259 | |
260 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | |
261 | ||
3000bf39 | 262 | /* sync other rings */ |
876dc9f3 | 263 | fence = bo->sync_obj; |
003cefe0 AD |
264 | r = radeon_copy(rdev, old_start, new_start, |
265 | new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */ | |
876dc9f3 | 266 | &fence); |
771fe6b9 | 267 | /* FIXME: handle copy error */ |
b03640b1 | 268 | r = ttm_bo_move_accel_cleanup(bo, (void *)fence, |
97a875cb | 269 | evict, no_wait_gpu, new_mem); |
771fe6b9 JG |
270 | radeon_fence_unref(&fence); |
271 | return r; | |
272 | } | |
273 | ||
274 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |
9d87fa21 | 275 | bool evict, bool interruptible, |
97a875cb | 276 | bool no_wait_gpu, |
771fe6b9 JG |
277 | struct ttm_mem_reg *new_mem) |
278 | { | |
279 | struct radeon_device *rdev; | |
280 | struct ttm_mem_reg *old_mem = &bo->mem; | |
281 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
282 | u32 placements; |
283 | struct ttm_placement placement; | |
771fe6b9 JG |
284 | int r; |
285 | ||
286 | rdev = radeon_get_rdev(bo->bdev); | |
287 | tmp_mem = *new_mem; | |
288 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
289 | placement.fpfn = 0; |
290 | placement.lpfn = 0; | |
291 | placement.num_placement = 1; | |
292 | placement.placement = &placements; | |
293 | placement.num_busy_placement = 1; | |
294 | placement.busy_placement = &placements; | |
295 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
296 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
97a875cb | 297 | interruptible, no_wait_gpu); |
771fe6b9 JG |
298 | if (unlikely(r)) { |
299 | return r; | |
300 | } | |
df67bed9 DA |
301 | |
302 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
303 | if (unlikely(r)) { | |
304 | goto out_cleanup; | |
305 | } | |
306 | ||
771fe6b9 JG |
307 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
308 | if (unlikely(r)) { | |
309 | goto out_cleanup; | |
310 | } | |
97a875cb | 311 | r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); |
771fe6b9 JG |
312 | if (unlikely(r)) { |
313 | goto out_cleanup; | |
314 | } | |
97a875cb | 315 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); |
771fe6b9 | 316 | out_cleanup: |
42311ff9 | 317 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
318 | return r; |
319 | } | |
320 | ||
321 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |
9d87fa21 | 322 | bool evict, bool interruptible, |
97a875cb | 323 | bool no_wait_gpu, |
771fe6b9 JG |
324 | struct ttm_mem_reg *new_mem) |
325 | { | |
326 | struct radeon_device *rdev; | |
327 | struct ttm_mem_reg *old_mem = &bo->mem; | |
328 | struct ttm_mem_reg tmp_mem; | |
312ea8da JG |
329 | struct ttm_placement placement; |
330 | u32 placements; | |
771fe6b9 JG |
331 | int r; |
332 | ||
333 | rdev = radeon_get_rdev(bo->bdev); | |
334 | tmp_mem = *new_mem; | |
335 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
336 | placement.fpfn = 0; |
337 | placement.lpfn = 0; | |
338 | placement.num_placement = 1; | |
339 | placement.placement = &placements; | |
340 | placement.num_busy_placement = 1; | |
341 | placement.busy_placement = &placements; | |
342 | placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
97a875cb ML |
343 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
344 | interruptible, no_wait_gpu); | |
771fe6b9 JG |
345 | if (unlikely(r)) { |
346 | return r; | |
347 | } | |
97a875cb | 348 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); |
771fe6b9 JG |
349 | if (unlikely(r)) { |
350 | goto out_cleanup; | |
351 | } | |
97a875cb | 352 | r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); |
771fe6b9 JG |
353 | if (unlikely(r)) { |
354 | goto out_cleanup; | |
355 | } | |
356 | out_cleanup: | |
42311ff9 | 357 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
358 | return r; |
359 | } | |
360 | ||
361 | static int radeon_bo_move(struct ttm_buffer_object *bo, | |
9d87fa21 | 362 | bool evict, bool interruptible, |
97a875cb | 363 | bool no_wait_gpu, |
9d87fa21 | 364 | struct ttm_mem_reg *new_mem) |
771fe6b9 JG |
365 | { |
366 | struct radeon_device *rdev; | |
367 | struct ttm_mem_reg *old_mem = &bo->mem; | |
368 | int r; | |
369 | ||
370 | rdev = radeon_get_rdev(bo->bdev); | |
371 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
372 | radeon_move_null(bo, new_mem); | |
373 | return 0; | |
374 | } | |
375 | if ((old_mem->mem_type == TTM_PL_TT && | |
376 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
377 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
378 | new_mem->mem_type == TTM_PL_TT)) { | |
af901ca1 | 379 | /* bind is enough */ |
771fe6b9 JG |
380 | radeon_move_null(bo, new_mem); |
381 | return 0; | |
382 | } | |
27cd7769 AD |
383 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
384 | rdev->asic->copy.copy == NULL) { | |
771fe6b9 | 385 | /* use memcpy */ |
1ab2e105 | 386 | goto memcpy; |
771fe6b9 JG |
387 | } |
388 | ||
389 | if (old_mem->mem_type == TTM_PL_VRAM && | |
390 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
1ab2e105 | 391 | r = radeon_move_vram_ram(bo, evict, interruptible, |
97a875cb | 392 | no_wait_gpu, new_mem); |
771fe6b9 JG |
393 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
394 | new_mem->mem_type == TTM_PL_VRAM) { | |
1ab2e105 | 395 | r = radeon_move_ram_vram(bo, evict, interruptible, |
97a875cb | 396 | no_wait_gpu, new_mem); |
771fe6b9 | 397 | } else { |
97a875cb | 398 | r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); |
771fe6b9 | 399 | } |
1ab2e105 MD |
400 | |
401 | if (r) { | |
402 | memcpy: | |
97a875cb | 403 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
1ab2e105 | 404 | } |
771fe6b9 JG |
405 | return r; |
406 | } | |
407 | ||
0a2d50e3 JG |
408 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
409 | { | |
410 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
411 | struct radeon_device *rdev = radeon_get_rdev(bdev); | |
412 | ||
413 | mem->bus.addr = NULL; | |
414 | mem->bus.offset = 0; | |
415 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
416 | mem->bus.base = 0; | |
417 | mem->bus.is_iomem = false; | |
418 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
419 | return -EINVAL; | |
420 | switch (mem->mem_type) { | |
421 | case TTM_PL_SYSTEM: | |
422 | /* system memory */ | |
423 | return 0; | |
424 | case TTM_PL_TT: | |
425 | #if __OS_HAS_AGP | |
426 | if (rdev->flags & RADEON_IS_AGP) { | |
427 | /* RADEON_IS_AGP is set only if AGP is active */ | |
d961db75 | 428 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 | 429 | mem->bus.base = rdev->mc.agp_base; |
365048ff | 430 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
0a2d50e3 JG |
431 | } |
432 | #endif | |
433 | break; | |
434 | case TTM_PL_VRAM: | |
d961db75 | 435 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 JG |
436 | /* check if it's visible */ |
437 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | |
438 | return -EINVAL; | |
439 | mem->bus.base = rdev->mc.aper_base; | |
440 | mem->bus.is_iomem = true; | |
ffb57c4b JE |
441 | #ifdef __alpha__ |
442 | /* | |
443 | * Alpha: use bus.addr to hold the ioremap() return, | |
444 | * so we can modify bus.base below. | |
445 | */ | |
446 | if (mem->placement & TTM_PL_FLAG_WC) | |
447 | mem->bus.addr = | |
448 | ioremap_wc(mem->bus.base + mem->bus.offset, | |
449 | mem->bus.size); | |
450 | else | |
451 | mem->bus.addr = | |
452 | ioremap_nocache(mem->bus.base + mem->bus.offset, | |
453 | mem->bus.size); | |
454 | ||
455 | /* | |
456 | * Alpha: Use just the bus offset plus | |
457 | * the hose/domain memory base for bus.base. | |
458 | * It then can be used to build PTEs for VRAM | |
459 | * access, as done in ttm_bo_vm_fault(). | |
460 | */ | |
461 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | |
462 | rdev->ddev->hose->dense_mem_base; | |
463 | #endif | |
0a2d50e3 JG |
464 | break; |
465 | default: | |
466 | return -EINVAL; | |
467 | } | |
468 | return 0; | |
469 | } | |
470 | ||
471 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
472 | { | |
473 | } | |
474 | ||
dedfdffd | 475 | static int radeon_sync_obj_wait(void *sync_obj, bool lazy, bool interruptible) |
771fe6b9 JG |
476 | { |
477 | return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible); | |
478 | } | |
479 | ||
dedfdffd | 480 | static int radeon_sync_obj_flush(void *sync_obj) |
771fe6b9 JG |
481 | { |
482 | return 0; | |
483 | } | |
484 | ||
485 | static void radeon_sync_obj_unref(void **sync_obj) | |
486 | { | |
487 | radeon_fence_unref((struct radeon_fence **)sync_obj); | |
488 | } | |
489 | ||
490 | static void *radeon_sync_obj_ref(void *sync_obj) | |
491 | { | |
492 | return radeon_fence_ref((struct radeon_fence *)sync_obj); | |
493 | } | |
494 | ||
dedfdffd | 495 | static bool radeon_sync_obj_signaled(void *sync_obj) |
771fe6b9 JG |
496 | { |
497 | return radeon_fence_signaled((struct radeon_fence *)sync_obj); | |
498 | } | |
499 | ||
649bf3ca JG |
500 | /* |
501 | * TTM backend functions. | |
502 | */ | |
503 | struct radeon_ttm_tt { | |
8e7e7052 | 504 | struct ttm_dma_tt ttm; |
649bf3ca JG |
505 | struct radeon_device *rdev; |
506 | u64 offset; | |
507 | }; | |
508 | ||
509 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, | |
510 | struct ttm_mem_reg *bo_mem) | |
511 | { | |
8e7e7052 | 512 | struct radeon_ttm_tt *gtt = (void*)ttm; |
649bf3ca JG |
513 | int r; |
514 | ||
649bf3ca JG |
515 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
516 | if (!ttm->num_pages) { | |
517 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
518 | ttm->num_pages, bo_mem, ttm); | |
519 | } | |
520 | r = radeon_gart_bind(gtt->rdev, gtt->offset, | |
8e7e7052 | 521 | ttm->num_pages, ttm->pages, gtt->ttm.dma_address); |
649bf3ca JG |
522 | if (r) { |
523 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
524 | ttm->num_pages, (unsigned)gtt->offset); | |
525 | return r; | |
526 | } | |
527 | return 0; | |
528 | } | |
529 | ||
530 | static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) | |
531 | { | |
8e7e7052 | 532 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 533 | |
649bf3ca JG |
534 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
535 | return 0; | |
536 | } | |
537 | ||
538 | static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) | |
539 | { | |
8e7e7052 | 540 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 541 | |
8e7e7052 | 542 | ttm_dma_tt_fini(>t->ttm); |
649bf3ca JG |
543 | kfree(gtt); |
544 | } | |
545 | ||
546 | static struct ttm_backend_func radeon_backend_func = { | |
547 | .bind = &radeon_ttm_backend_bind, | |
548 | .unbind = &radeon_ttm_backend_unbind, | |
549 | .destroy = &radeon_ttm_backend_destroy, | |
550 | }; | |
551 | ||
1109ca09 | 552 | static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, |
649bf3ca JG |
553 | unsigned long size, uint32_t page_flags, |
554 | struct page *dummy_read_page) | |
555 | { | |
556 | struct radeon_device *rdev; | |
557 | struct radeon_ttm_tt *gtt; | |
558 | ||
559 | rdev = radeon_get_rdev(bdev); | |
560 | #if __OS_HAS_AGP | |
561 | if (rdev->flags & RADEON_IS_AGP) { | |
562 | return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, | |
563 | size, page_flags, dummy_read_page); | |
564 | } | |
565 | #endif | |
566 | ||
567 | gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); | |
568 | if (gtt == NULL) { | |
569 | return NULL; | |
570 | } | |
8e7e7052 | 571 | gtt->ttm.ttm.func = &radeon_backend_func; |
649bf3ca | 572 | gtt->rdev = rdev; |
8e7e7052 JG |
573 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { |
574 | kfree(gtt); | |
649bf3ca JG |
575 | return NULL; |
576 | } | |
8e7e7052 | 577 | return >t->ttm.ttm; |
649bf3ca JG |
578 | } |
579 | ||
c52494f6 KRW |
580 | static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
581 | { | |
582 | struct radeon_device *rdev; | |
8e7e7052 | 583 | struct radeon_ttm_tt *gtt = (void *)ttm; |
c52494f6 KRW |
584 | unsigned i; |
585 | int r; | |
40f5cf99 | 586 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
c52494f6 KRW |
587 | |
588 | if (ttm->state != tt_unpopulated) | |
589 | return 0; | |
590 | ||
40f5cf99 AD |
591 | if (slave && ttm->sg) { |
592 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
593 | gtt->ttm.dma_address, ttm->num_pages); | |
594 | ttm->state = tt_unbound; | |
595 | return 0; | |
596 | } | |
597 | ||
c52494f6 | 598 | rdev = radeon_get_rdev(ttm->bdev); |
dea7e0ac JG |
599 | #if __OS_HAS_AGP |
600 | if (rdev->flags & RADEON_IS_AGP) { | |
601 | return ttm_agp_tt_populate(ttm); | |
602 | } | |
603 | #endif | |
c52494f6 KRW |
604 | |
605 | #ifdef CONFIG_SWIOTLB | |
606 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 607 | return ttm_dma_populate(>t->ttm, rdev->dev); |
c52494f6 KRW |
608 | } |
609 | #endif | |
610 | ||
611 | r = ttm_pool_populate(ttm); | |
612 | if (r) { | |
613 | return r; | |
614 | } | |
615 | ||
616 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
617 | gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], |
618 | 0, PAGE_SIZE, | |
619 | PCI_DMA_BIDIRECTIONAL); | |
620 | if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { | |
c52494f6 | 621 | while (--i) { |
8e7e7052 | 622 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], |
c52494f6 | 623 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 624 | gtt->ttm.dma_address[i] = 0; |
c52494f6 KRW |
625 | } |
626 | ttm_pool_unpopulate(ttm); | |
627 | return -EFAULT; | |
628 | } | |
629 | } | |
630 | return 0; | |
631 | } | |
632 | ||
633 | static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
634 | { | |
635 | struct radeon_device *rdev; | |
8e7e7052 | 636 | struct radeon_ttm_tt *gtt = (void *)ttm; |
c52494f6 | 637 | unsigned i; |
40f5cf99 AD |
638 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
639 | ||
640 | if (slave) | |
641 | return; | |
c52494f6 KRW |
642 | |
643 | rdev = radeon_get_rdev(ttm->bdev); | |
dea7e0ac JG |
644 | #if __OS_HAS_AGP |
645 | if (rdev->flags & RADEON_IS_AGP) { | |
646 | ttm_agp_tt_unpopulate(ttm); | |
647 | return; | |
648 | } | |
649 | #endif | |
c52494f6 KRW |
650 | |
651 | #ifdef CONFIG_SWIOTLB | |
652 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 653 | ttm_dma_unpopulate(>t->ttm, rdev->dev); |
c52494f6 KRW |
654 | return; |
655 | } | |
656 | #endif | |
657 | ||
658 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
659 | if (gtt->ttm.dma_address[i]) { |
660 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], | |
c52494f6 KRW |
661 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
662 | } | |
663 | } | |
664 | ||
665 | ttm_pool_unpopulate(ttm); | |
666 | } | |
649bf3ca | 667 | |
771fe6b9 | 668 | static struct ttm_bo_driver radeon_bo_driver = { |
649bf3ca | 669 | .ttm_tt_create = &radeon_ttm_tt_create, |
c52494f6 KRW |
670 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
671 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, | |
771fe6b9 JG |
672 | .invalidate_caches = &radeon_invalidate_caches, |
673 | .init_mem_type = &radeon_init_mem_type, | |
674 | .evict_flags = &radeon_evict_flags, | |
675 | .move = &radeon_bo_move, | |
676 | .verify_access = &radeon_verify_access, | |
677 | .sync_obj_signaled = &radeon_sync_obj_signaled, | |
678 | .sync_obj_wait = &radeon_sync_obj_wait, | |
679 | .sync_obj_flush = &radeon_sync_obj_flush, | |
680 | .sync_obj_unref = &radeon_sync_obj_unref, | |
681 | .sync_obj_ref = &radeon_sync_obj_ref, | |
e024e110 DA |
682 | .move_notify = &radeon_bo_move_notify, |
683 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | |
0a2d50e3 JG |
684 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
685 | .io_mem_free = &radeon_ttm_io_mem_free, | |
771fe6b9 JG |
686 | }; |
687 | ||
688 | int radeon_ttm_init(struct radeon_device *rdev) | |
689 | { | |
690 | int r; | |
691 | ||
692 | r = radeon_ttm_global_init(rdev); | |
693 | if (r) { | |
694 | return r; | |
695 | } | |
696 | /* No others user of address space so set it to 0 */ | |
697 | r = ttm_bo_device_init(&rdev->mman.bdev, | |
a987fcaa | 698 | rdev->mman.bo_global_ref.ref.object, |
ad49f501 DA |
699 | &radeon_bo_driver, DRM_FILE_PAGE_OFFSET, |
700 | rdev->need_dma32); | |
771fe6b9 JG |
701 | if (r) { |
702 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
703 | return r; | |
704 | } | |
0a0c7596 | 705 | rdev->mman.initialized = true; |
4c788679 | 706 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
312ea8da | 707 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
771fe6b9 JG |
708 | if (r) { |
709 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
710 | return r; | |
711 | } | |
441921d5 | 712 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
40f5cf99 AD |
713 | RADEON_GEM_DOMAIN_VRAM, |
714 | NULL, &rdev->stollen_vga_memory); | |
771fe6b9 JG |
715 | if (r) { |
716 | return r; | |
717 | } | |
4c788679 JG |
718 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
719 | if (r) | |
720 | return r; | |
721 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | |
722 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
771fe6b9 | 723 | if (r) { |
4c788679 | 724 | radeon_bo_unref(&rdev->stollen_vga_memory); |
771fe6b9 JG |
725 | return r; |
726 | } | |
727 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | |
3ce0a23d | 728 | (unsigned)rdev->mc.real_vram_size / (1024 * 1024)); |
4c788679 | 729 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
312ea8da | 730 | rdev->mc.gtt_size >> PAGE_SHIFT); |
771fe6b9 JG |
731 | if (r) { |
732 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
733 | return r; | |
734 | } | |
735 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | |
3ce0a23d | 736 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
949c4a34 | 737 | rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping; |
fa8a1238 DA |
738 | |
739 | r = radeon_ttm_debugfs_init(rdev); | |
740 | if (r) { | |
741 | DRM_ERROR("Failed to init debugfs\n"); | |
742 | return r; | |
743 | } | |
771fe6b9 JG |
744 | return 0; |
745 | } | |
746 | ||
747 | void radeon_ttm_fini(struct radeon_device *rdev) | |
748 | { | |
4c788679 JG |
749 | int r; |
750 | ||
0a0c7596 JG |
751 | if (!rdev->mman.initialized) |
752 | return; | |
771fe6b9 | 753 | if (rdev->stollen_vga_memory) { |
4c788679 JG |
754 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
755 | if (r == 0) { | |
756 | radeon_bo_unpin(rdev->stollen_vga_memory); | |
757 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
758 | } | |
759 | radeon_bo_unref(&rdev->stollen_vga_memory); | |
771fe6b9 JG |
760 | } |
761 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
762 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | |
763 | ttm_bo_device_release(&rdev->mman.bdev); | |
764 | radeon_gart_fini(rdev); | |
765 | radeon_ttm_global_fini(rdev); | |
0a0c7596 | 766 | rdev->mman.initialized = false; |
771fe6b9 JG |
767 | DRM_INFO("radeon: ttm finalized\n"); |
768 | } | |
769 | ||
53595338 DA |
770 | /* this should only be called at bootup or when userspace |
771 | * isn't running */ | |
772 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | |
773 | { | |
774 | struct ttm_mem_type_manager *man; | |
775 | ||
776 | if (!rdev->mman.initialized) | |
777 | return; | |
778 | ||
779 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
780 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
781 | man->size = size >> PAGE_SHIFT; | |
782 | } | |
783 | ||
771fe6b9 | 784 | static struct vm_operations_struct radeon_ttm_vm_ops; |
f0f37e2f | 785 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
771fe6b9 JG |
786 | |
787 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
788 | { | |
789 | struct ttm_buffer_object *bo; | |
5876dd24 | 790 | struct radeon_device *rdev; |
771fe6b9 JG |
791 | int r; |
792 | ||
5876dd24 | 793 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
771fe6b9 JG |
794 | if (bo == NULL) { |
795 | return VM_FAULT_NOPAGE; | |
796 | } | |
5876dd24 | 797 | rdev = radeon_get_rdev(bo->bdev); |
db7fce39 | 798 | down_read(&rdev->pm.mclk_lock); |
771fe6b9 | 799 | r = ttm_vm_ops->fault(vma, vmf); |
db7fce39 | 800 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 JG |
801 | return r; |
802 | } | |
803 | ||
804 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |
805 | { | |
806 | struct drm_file *file_priv; | |
807 | struct radeon_device *rdev; | |
808 | int r; | |
809 | ||
810 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
811 | return drm_mmap(filp, vma); | |
812 | } | |
813 | ||
40b3be3f | 814 | file_priv = filp->private_data; |
771fe6b9 JG |
815 | rdev = file_priv->minor->dev->dev_private; |
816 | if (rdev == NULL) { | |
817 | return -EINVAL; | |
818 | } | |
819 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); | |
820 | if (unlikely(r != 0)) { | |
821 | return r; | |
822 | } | |
823 | if (unlikely(ttm_vm_ops == NULL)) { | |
824 | ttm_vm_ops = vma->vm_ops; | |
825 | radeon_ttm_vm_ops = *ttm_vm_ops; | |
826 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; | |
827 | } | |
828 | vma->vm_ops = &radeon_ttm_vm_ops; | |
829 | return 0; | |
830 | } | |
831 | ||
832 | ||
fa8a1238 DA |
833 | #define RADEON_DEBUGFS_MEM_TYPES 2 |
834 | ||
fa8a1238 DA |
835 | #if defined(CONFIG_DEBUG_FS) |
836 | static int radeon_mm_dump_table(struct seq_file *m, void *data) | |
837 | { | |
838 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
839 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | |
840 | struct drm_device *dev = node->minor->dev; | |
841 | struct radeon_device *rdev = dev->dev_private; | |
842 | int ret; | |
843 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | |
844 | ||
845 | spin_lock(&glob->lru_lock); | |
846 | ret = drm_mm_dump_table(m, mm); | |
847 | spin_unlock(&glob->lru_lock); | |
848 | return ret; | |
849 | } | |
850 | #endif | |
851 | ||
852 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |
853 | { | |
f4e45d02 | 854 | #if defined(CONFIG_DEBUG_FS) |
c52494f6 KRW |
855 | static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2]; |
856 | static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32]; | |
fa8a1238 DA |
857 | unsigned i; |
858 | ||
fa8a1238 DA |
859 | for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { |
860 | if (i == 0) | |
861 | sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); | |
862 | else | |
863 | sprintf(radeon_mem_types_names[i], "radeon_gtt_mm"); | |
864 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
865 | radeon_mem_types_list[i].show = &radeon_mm_dump_table; | |
866 | radeon_mem_types_list[i].driver_features = 0; | |
867 | if (i == 0) | |
16f9fdcb | 868 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv; |
fa8a1238 | 869 | else |
16f9fdcb | 870 | radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv; |
fa8a1238 DA |
871 | |
872 | } | |
8d7cddcd PN |
873 | /* Add ttm page pool to debugfs */ |
874 | sprintf(radeon_mem_types_names[i], "ttm_page_pool"); | |
875 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
876 | radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs; | |
877 | radeon_mem_types_list[i].driver_features = 0; | |
c52494f6 KRW |
878 | radeon_mem_types_list[i++].data = NULL; |
879 | #ifdef CONFIG_SWIOTLB | |
880 | if (swiotlb_nr_tbl()) { | |
881 | sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool"); | |
882 | radeon_mem_types_list[i].name = radeon_mem_types_names[i]; | |
883 | radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs; | |
884 | radeon_mem_types_list[i].driver_features = 0; | |
885 | radeon_mem_types_list[i++].data = NULL; | |
886 | } | |
887 | #endif | |
888 | return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i); | |
fa8a1238 DA |
889 | |
890 | #endif | |
891 | return 0; | |
892 | } |