Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
8d7cddcd | 36 | #include <ttm/ttm_page_alloc.h> |
771fe6b9 JG |
37 | #include <drm/drmP.h> |
38 | #include <drm/radeon_drm.h> | |
fa8a1238 | 39 | #include <linux/seq_file.h> |
5a0e3ad6 | 40 | #include <linux/slab.h> |
4cfe7629 | 41 | #include <linux/swiotlb.h> |
f72a113a CK |
42 | #include <linux/swap.h> |
43 | #include <linux/pagemap.h> | |
2014b569 | 44 | #include <linux/debugfs.h> |
771fe6b9 JG |
45 | #include "radeon_reg.h" |
46 | #include "radeon.h" | |
47 | ||
48 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
49 | ||
fa8a1238 | 50 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev); |
2014b569 | 51 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); |
fa8a1238 | 52 | |
771fe6b9 JG |
53 | static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) |
54 | { | |
55 | struct radeon_mman *mman; | |
56 | struct radeon_device *rdev; | |
57 | ||
58 | mman = container_of(bdev, struct radeon_mman, bdev); | |
59 | rdev = container_of(mman, struct radeon_device, mman); | |
60 | return rdev; | |
61 | } | |
62 | ||
63 | ||
64 | /* | |
65 | * Global memory. | |
66 | */ | |
ba4420c2 | 67 | static int radeon_ttm_mem_global_init(struct drm_global_reference *ref) |
771fe6b9 JG |
68 | { |
69 | return ttm_mem_global_init(ref->object); | |
70 | } | |
71 | ||
ba4420c2 | 72 | static void radeon_ttm_mem_global_release(struct drm_global_reference *ref) |
771fe6b9 JG |
73 | { |
74 | ttm_mem_global_release(ref->object); | |
75 | } | |
76 | ||
77 | static int radeon_ttm_global_init(struct radeon_device *rdev) | |
78 | { | |
ba4420c2 | 79 | struct drm_global_reference *global_ref; |
771fe6b9 JG |
80 | int r; |
81 | ||
82 | rdev->mman.mem_global_referenced = false; | |
83 | global_ref = &rdev->mman.mem_global_ref; | |
ba4420c2 | 84 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; |
771fe6b9 JG |
85 | global_ref->size = sizeof(struct ttm_mem_global); |
86 | global_ref->init = &radeon_ttm_mem_global_init; | |
87 | global_ref->release = &radeon_ttm_mem_global_release; | |
ba4420c2 | 88 | r = drm_global_item_ref(global_ref); |
771fe6b9 | 89 | if (r != 0) { |
a987fcaa TH |
90 | DRM_ERROR("Failed setting up TTM memory accounting " |
91 | "subsystem.\n"); | |
771fe6b9 JG |
92 | return r; |
93 | } | |
a987fcaa TH |
94 | |
95 | rdev->mman.bo_global_ref.mem_glob = | |
96 | rdev->mman.mem_global_ref.object; | |
97 | global_ref = &rdev->mman.bo_global_ref.ref; | |
ba4420c2 | 98 | global_ref->global_type = DRM_GLOBAL_TTM_BO; |
7f5f4db2 | 99 | global_ref->size = sizeof(struct ttm_bo_global); |
a987fcaa TH |
100 | global_ref->init = &ttm_bo_global_init; |
101 | global_ref->release = &ttm_bo_global_release; | |
ba4420c2 | 102 | r = drm_global_item_ref(global_ref); |
a987fcaa TH |
103 | if (r != 0) { |
104 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
ba4420c2 | 105 | drm_global_item_unref(&rdev->mman.mem_global_ref); |
a987fcaa TH |
106 | return r; |
107 | } | |
108 | ||
771fe6b9 JG |
109 | rdev->mman.mem_global_referenced = true; |
110 | return 0; | |
111 | } | |
112 | ||
113 | static void radeon_ttm_global_fini(struct radeon_device *rdev) | |
114 | { | |
115 | if (rdev->mman.mem_global_referenced) { | |
ba4420c2 DA |
116 | drm_global_item_unref(&rdev->mman.bo_global_ref.ref); |
117 | drm_global_item_unref(&rdev->mman.mem_global_ref); | |
771fe6b9 JG |
118 | rdev->mman.mem_global_referenced = false; |
119 | } | |
120 | } | |
121 | ||
771fe6b9 JG |
122 | static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) |
123 | { | |
124 | return 0; | |
125 | } | |
126 | ||
127 | static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
128 | struct ttm_mem_type_manager *man) | |
129 | { | |
130 | struct radeon_device *rdev; | |
131 | ||
132 | rdev = radeon_get_rdev(bdev); | |
133 | ||
134 | switch (type) { | |
135 | case TTM_PL_SYSTEM: | |
136 | /* System memory */ | |
137 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
138 | man->available_caching = TTM_PL_MASK_CACHING; | |
139 | man->default_caching = TTM_PL_FLAG_CACHED; | |
140 | break; | |
141 | case TTM_PL_TT: | |
d961db75 | 142 | man->func = &ttm_bo_manager_func; |
d594e46a | 143 | man->gpu_offset = rdev->mc.gtt_start; |
771fe6b9 JG |
144 | man->available_caching = TTM_PL_MASK_CACHING; |
145 | man->default_caching = TTM_PL_FLAG_CACHED; | |
55c93278 | 146 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; |
771fe6b9 JG |
147 | #if __OS_HAS_AGP |
148 | if (rdev->flags & RADEON_IS_AGP) { | |
d9906753 | 149 | if (!rdev->ddev->agp) { |
771fe6b9 JG |
150 | DRM_ERROR("AGP is not enabled for memory type %u\n", |
151 | (unsigned)type); | |
152 | return -EINVAL; | |
153 | } | |
55c93278 | 154 | if (!rdev->ddev->agp->cant_use_aperture) |
0a2d50e3 | 155 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; |
771fe6b9 JG |
156 | man->available_caching = TTM_PL_FLAG_UNCACHED | |
157 | TTM_PL_FLAG_WC; | |
158 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 | 159 | } |
0c321c79 | 160 | #endif |
771fe6b9 JG |
161 | break; |
162 | case TTM_PL_VRAM: | |
163 | /* "On-card" video ram */ | |
d961db75 | 164 | man->func = &ttm_bo_manager_func; |
d594e46a | 165 | man->gpu_offset = rdev->mc.vram_start; |
771fe6b9 | 166 | man->flags = TTM_MEMTYPE_FLAG_FIXED | |
771fe6b9 JG |
167 | TTM_MEMTYPE_FLAG_MAPPABLE; |
168 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
169 | man->default_caching = TTM_PL_FLAG_WC; | |
771fe6b9 JG |
170 | break; |
171 | default: | |
172 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
173 | return -EINVAL; | |
174 | } | |
175 | return 0; | |
176 | } | |
177 | ||
312ea8da JG |
178 | static void radeon_evict_flags(struct ttm_buffer_object *bo, |
179 | struct ttm_placement *placement) | |
771fe6b9 | 180 | { |
f1217ed0 CK |
181 | static struct ttm_place placements = { |
182 | .fpfn = 0, | |
183 | .lpfn = 0, | |
184 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | |
185 | }; | |
186 | ||
d03d8589 | 187 | struct radeon_bo *rbo; |
d03d8589 JG |
188 | |
189 | if (!radeon_ttm_bo_is_radeon_bo(bo)) { | |
d03d8589 JG |
190 | placement->placement = &placements; |
191 | placement->busy_placement = &placements; | |
192 | placement->num_placement = 1; | |
193 | placement->num_busy_placement = 1; | |
194 | return; | |
195 | } | |
196 | rbo = container_of(bo, struct radeon_bo, tbo); | |
771fe6b9 | 197 | switch (bo->mem.mem_type) { |
312ea8da | 198 | case TTM_PL_VRAM: |
e32eb50d | 199 | if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false) |
9270eb1b DA |
200 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
201 | else | |
202 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
312ea8da JG |
203 | break; |
204 | case TTM_PL_TT: | |
771fe6b9 | 205 | default: |
312ea8da | 206 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU); |
771fe6b9 | 207 | } |
eaa5fd1a | 208 | *placement = rbo->placement; |
771fe6b9 JG |
209 | } |
210 | ||
211 | static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
212 | { | |
acb46527 DH |
213 | struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo); |
214 | ||
215 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); | |
771fe6b9 JG |
216 | } |
217 | ||
218 | static void radeon_move_null(struct ttm_buffer_object *bo, | |
219 | struct ttm_mem_reg *new_mem) | |
220 | { | |
221 | struct ttm_mem_reg *old_mem = &bo->mem; | |
222 | ||
223 | BUG_ON(old_mem->mm_node != NULL); | |
224 | *old_mem = *new_mem; | |
225 | new_mem->mm_node = NULL; | |
226 | } | |
227 | ||
228 | static int radeon_move_blit(struct ttm_buffer_object *bo, | |
97a875cb | 229 | bool evict, bool no_wait_gpu, |
9d87fa21 JG |
230 | struct ttm_mem_reg *new_mem, |
231 | struct ttm_mem_reg *old_mem) | |
771fe6b9 JG |
232 | { |
233 | struct radeon_device *rdev; | |
234 | uint64_t old_start, new_start; | |
876dc9f3 | 235 | struct radeon_fence *fence; |
57d20a43 | 236 | unsigned num_pages; |
876dc9f3 | 237 | int r, ridx; |
771fe6b9 JG |
238 | |
239 | rdev = radeon_get_rdev(bo->bdev); | |
876dc9f3 | 240 | ridx = radeon_copy_ring_index(rdev); |
d961db75 BS |
241 | old_start = old_mem->start << PAGE_SHIFT; |
242 | new_start = new_mem->start << PAGE_SHIFT; | |
771fe6b9 JG |
243 | |
244 | switch (old_mem->mem_type) { | |
245 | case TTM_PL_VRAM: | |
d594e46a | 246 | old_start += rdev->mc.vram_start; |
771fe6b9 JG |
247 | break; |
248 | case TTM_PL_TT: | |
d594e46a | 249 | old_start += rdev->mc.gtt_start; |
771fe6b9 JG |
250 | break; |
251 | default: | |
252 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
253 | return -EINVAL; | |
254 | } | |
255 | switch (new_mem->mem_type) { | |
256 | case TTM_PL_VRAM: | |
d594e46a | 257 | new_start += rdev->mc.vram_start; |
771fe6b9 JG |
258 | break; |
259 | case TTM_PL_TT: | |
d594e46a | 260 | new_start += rdev->mc.gtt_start; |
771fe6b9 JG |
261 | break; |
262 | default: | |
263 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
264 | return -EINVAL; | |
265 | } | |
876dc9f3 | 266 | if (!rdev->ring[ridx].ready) { |
3000bf39 | 267 | DRM_ERROR("Trying to move memory with ring turned off.\n"); |
771fe6b9 JG |
268 | return -EINVAL; |
269 | } | |
003cefe0 AD |
270 | |
271 | BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); | |
272 | ||
57d20a43 CK |
273 | num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); |
274 | fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->resv); | |
275 | if (IS_ERR(fence)) | |
276 | return PTR_ERR(fence); | |
277 | ||
f2c24b83 | 278 | r = ttm_bo_move_accel_cleanup(bo, &fence->base, |
97a875cb | 279 | evict, no_wait_gpu, new_mem); |
771fe6b9 JG |
280 | radeon_fence_unref(&fence); |
281 | return r; | |
282 | } | |
283 | ||
284 | static int radeon_move_vram_ram(struct ttm_buffer_object *bo, | |
9d87fa21 | 285 | bool evict, bool interruptible, |
97a875cb | 286 | bool no_wait_gpu, |
771fe6b9 JG |
287 | struct ttm_mem_reg *new_mem) |
288 | { | |
289 | struct radeon_device *rdev; | |
290 | struct ttm_mem_reg *old_mem = &bo->mem; | |
291 | struct ttm_mem_reg tmp_mem; | |
f1217ed0 | 292 | struct ttm_place placements; |
312ea8da | 293 | struct ttm_placement placement; |
771fe6b9 JG |
294 | int r; |
295 | ||
296 | rdev = radeon_get_rdev(bo->bdev); | |
297 | tmp_mem = *new_mem; | |
298 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
299 | placement.num_placement = 1; |
300 | placement.placement = &placements; | |
301 | placement.num_busy_placement = 1; | |
302 | placement.busy_placement = &placements; | |
f1217ed0 CK |
303 | placements.fpfn = 0; |
304 | placements.lpfn = 0; | |
305 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
312ea8da | 306 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
97a875cb | 307 | interruptible, no_wait_gpu); |
771fe6b9 JG |
308 | if (unlikely(r)) { |
309 | return r; | |
310 | } | |
df67bed9 DA |
311 | |
312 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
313 | if (unlikely(r)) { | |
314 | goto out_cleanup; | |
315 | } | |
316 | ||
771fe6b9 JG |
317 | r = ttm_tt_bind(bo->ttm, &tmp_mem); |
318 | if (unlikely(r)) { | |
319 | goto out_cleanup; | |
320 | } | |
97a875cb | 321 | r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); |
771fe6b9 JG |
322 | if (unlikely(r)) { |
323 | goto out_cleanup; | |
324 | } | |
97a875cb | 325 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); |
771fe6b9 | 326 | out_cleanup: |
42311ff9 | 327 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
328 | return r; |
329 | } | |
330 | ||
331 | static int radeon_move_ram_vram(struct ttm_buffer_object *bo, | |
9d87fa21 | 332 | bool evict, bool interruptible, |
97a875cb | 333 | bool no_wait_gpu, |
771fe6b9 JG |
334 | struct ttm_mem_reg *new_mem) |
335 | { | |
336 | struct radeon_device *rdev; | |
337 | struct ttm_mem_reg *old_mem = &bo->mem; | |
338 | struct ttm_mem_reg tmp_mem; | |
312ea8da | 339 | struct ttm_placement placement; |
f1217ed0 | 340 | struct ttm_place placements; |
771fe6b9 JG |
341 | int r; |
342 | ||
343 | rdev = radeon_get_rdev(bo->bdev); | |
344 | tmp_mem = *new_mem; | |
345 | tmp_mem.mm_node = NULL; | |
312ea8da JG |
346 | placement.num_placement = 1; |
347 | placement.placement = &placements; | |
348 | placement.num_busy_placement = 1; | |
349 | placement.busy_placement = &placements; | |
f1217ed0 CK |
350 | placements.fpfn = 0; |
351 | placements.lpfn = 0; | |
352 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
97a875cb ML |
353 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, |
354 | interruptible, no_wait_gpu); | |
771fe6b9 JG |
355 | if (unlikely(r)) { |
356 | return r; | |
357 | } | |
97a875cb | 358 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); |
771fe6b9 JG |
359 | if (unlikely(r)) { |
360 | goto out_cleanup; | |
361 | } | |
97a875cb | 362 | r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); |
771fe6b9 JG |
363 | if (unlikely(r)) { |
364 | goto out_cleanup; | |
365 | } | |
366 | out_cleanup: | |
42311ff9 | 367 | ttm_bo_mem_put(bo, &tmp_mem); |
771fe6b9 JG |
368 | return r; |
369 | } | |
370 | ||
371 | static int radeon_bo_move(struct ttm_buffer_object *bo, | |
9d87fa21 | 372 | bool evict, bool interruptible, |
97a875cb | 373 | bool no_wait_gpu, |
9d87fa21 | 374 | struct ttm_mem_reg *new_mem) |
771fe6b9 JG |
375 | { |
376 | struct radeon_device *rdev; | |
377 | struct ttm_mem_reg *old_mem = &bo->mem; | |
378 | int r; | |
379 | ||
380 | rdev = radeon_get_rdev(bo->bdev); | |
381 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
382 | radeon_move_null(bo, new_mem); | |
383 | return 0; | |
384 | } | |
385 | if ((old_mem->mem_type == TTM_PL_TT && | |
386 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
387 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
388 | new_mem->mem_type == TTM_PL_TT)) { | |
af901ca1 | 389 | /* bind is enough */ |
771fe6b9 JG |
390 | radeon_move_null(bo, new_mem); |
391 | return 0; | |
392 | } | |
27cd7769 AD |
393 | if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || |
394 | rdev->asic->copy.copy == NULL) { | |
771fe6b9 | 395 | /* use memcpy */ |
1ab2e105 | 396 | goto memcpy; |
771fe6b9 JG |
397 | } |
398 | ||
399 | if (old_mem->mem_type == TTM_PL_VRAM && | |
400 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
1ab2e105 | 401 | r = radeon_move_vram_ram(bo, evict, interruptible, |
97a875cb | 402 | no_wait_gpu, new_mem); |
771fe6b9 JG |
403 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && |
404 | new_mem->mem_type == TTM_PL_VRAM) { | |
1ab2e105 | 405 | r = radeon_move_ram_vram(bo, evict, interruptible, |
97a875cb | 406 | no_wait_gpu, new_mem); |
771fe6b9 | 407 | } else { |
97a875cb | 408 | r = radeon_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); |
771fe6b9 | 409 | } |
1ab2e105 MD |
410 | |
411 | if (r) { | |
412 | memcpy: | |
97a875cb | 413 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); |
67e8e3f9 MO |
414 | if (r) { |
415 | return r; | |
416 | } | |
1ab2e105 | 417 | } |
67e8e3f9 MO |
418 | |
419 | /* update statistics */ | |
420 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); | |
421 | return 0; | |
771fe6b9 JG |
422 | } |
423 | ||
0a2d50e3 JG |
424 | static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) |
425 | { | |
426 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
427 | struct radeon_device *rdev = radeon_get_rdev(bdev); | |
428 | ||
429 | mem->bus.addr = NULL; | |
430 | mem->bus.offset = 0; | |
431 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
432 | mem->bus.base = 0; | |
433 | mem->bus.is_iomem = false; | |
434 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
435 | return -EINVAL; | |
436 | switch (mem->mem_type) { | |
437 | case TTM_PL_SYSTEM: | |
438 | /* system memory */ | |
439 | return 0; | |
440 | case TTM_PL_TT: | |
441 | #if __OS_HAS_AGP | |
442 | if (rdev->flags & RADEON_IS_AGP) { | |
443 | /* RADEON_IS_AGP is set only if AGP is active */ | |
d961db75 | 444 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 | 445 | mem->bus.base = rdev->mc.agp_base; |
365048ff | 446 | mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; |
0a2d50e3 JG |
447 | } |
448 | #endif | |
449 | break; | |
450 | case TTM_PL_VRAM: | |
d961db75 | 451 | mem->bus.offset = mem->start << PAGE_SHIFT; |
0a2d50e3 JG |
452 | /* check if it's visible */ |
453 | if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size) | |
454 | return -EINVAL; | |
455 | mem->bus.base = rdev->mc.aper_base; | |
456 | mem->bus.is_iomem = true; | |
ffb57c4b JE |
457 | #ifdef __alpha__ |
458 | /* | |
459 | * Alpha: use bus.addr to hold the ioremap() return, | |
460 | * so we can modify bus.base below. | |
461 | */ | |
462 | if (mem->placement & TTM_PL_FLAG_WC) | |
463 | mem->bus.addr = | |
464 | ioremap_wc(mem->bus.base + mem->bus.offset, | |
465 | mem->bus.size); | |
466 | else | |
467 | mem->bus.addr = | |
468 | ioremap_nocache(mem->bus.base + mem->bus.offset, | |
469 | mem->bus.size); | |
470 | ||
471 | /* | |
472 | * Alpha: Use just the bus offset plus | |
473 | * the hose/domain memory base for bus.base. | |
474 | * It then can be used to build PTEs for VRAM | |
475 | * access, as done in ttm_bo_vm_fault(). | |
476 | */ | |
477 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | |
478 | rdev->ddev->hose->dense_mem_base; | |
479 | #endif | |
0a2d50e3 JG |
480 | break; |
481 | default: | |
482 | return -EINVAL; | |
483 | } | |
484 | return 0; | |
485 | } | |
486 | ||
487 | static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
488 | { | |
489 | } | |
490 | ||
649bf3ca JG |
491 | /* |
492 | * TTM backend functions. | |
493 | */ | |
494 | struct radeon_ttm_tt { | |
8e7e7052 | 495 | struct ttm_dma_tt ttm; |
649bf3ca JG |
496 | struct radeon_device *rdev; |
497 | u64 offset; | |
f72a113a CK |
498 | |
499 | uint64_t userptr; | |
500 | struct mm_struct *usermm; | |
501 | uint32_t userflags; | |
649bf3ca JG |
502 | }; |
503 | ||
f72a113a CK |
504 | /* prepare the sg table with the user pages */ |
505 | static int radeon_ttm_tt_pin_userptr(struct ttm_tt *ttm) | |
506 | { | |
507 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); | |
508 | struct radeon_ttm_tt *gtt = (void *)ttm; | |
509 | unsigned pinned = 0, nents; | |
510 | int r; | |
511 | ||
512 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | |
513 | enum dma_data_direction direction = write ? | |
514 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
515 | ||
516 | if (current->mm != gtt->usermm) | |
517 | return -EPERM; | |
518 | ||
ddd00e33 CK |
519 | if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { |
520 | /* check that we only pin down anonymous memory | |
521 | to prevent problems with writeback */ | |
522 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | |
523 | struct vm_area_struct *vma; | |
524 | vma = find_vma(gtt->usermm, gtt->userptr); | |
525 | if (!vma || vma->vm_file || vma->vm_end < end) | |
526 | return -EPERM; | |
527 | } | |
528 | ||
f72a113a CK |
529 | do { |
530 | unsigned num_pages = ttm->num_pages - pinned; | |
531 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | |
532 | struct page **pages = ttm->pages + pinned; | |
533 | ||
534 | r = get_user_pages(current, current->mm, userptr, num_pages, | |
535 | write, 0, pages, NULL); | |
536 | if (r < 0) | |
537 | goto release_pages; | |
538 | ||
539 | pinned += r; | |
540 | ||
541 | } while (pinned < ttm->num_pages); | |
542 | ||
543 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, | |
544 | ttm->num_pages << PAGE_SHIFT, | |
545 | GFP_KERNEL); | |
546 | if (r) | |
547 | goto release_sg; | |
548 | ||
549 | r = -ENOMEM; | |
550 | nents = dma_map_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
551 | if (nents != ttm->sg->nents) | |
552 | goto release_sg; | |
553 | ||
554 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
555 | gtt->ttm.dma_address, ttm->num_pages); | |
556 | ||
557 | return 0; | |
558 | ||
559 | release_sg: | |
560 | kfree(ttm->sg); | |
561 | ||
562 | release_pages: | |
563 | release_pages(ttm->pages, pinned, 0); | |
564 | return r; | |
565 | } | |
566 | ||
567 | static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |
568 | { | |
569 | struct radeon_device *rdev = radeon_get_rdev(ttm->bdev); | |
570 | struct radeon_ttm_tt *gtt = (void *)ttm; | |
571 | struct scatterlist *sg; | |
572 | int i; | |
573 | ||
574 | int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | |
575 | enum dma_data_direction direction = write ? | |
576 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
577 | ||
578 | /* free the sg table and pages again */ | |
579 | dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
580 | ||
581 | for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) { | |
582 | struct page *page = sg_page(sg); | |
583 | ||
584 | if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY)) | |
585 | set_page_dirty(page); | |
586 | ||
587 | mark_page_accessed(page); | |
588 | page_cache_release(page); | |
589 | } | |
590 | ||
591 | sg_free_table(ttm->sg); | |
592 | } | |
593 | ||
649bf3ca JG |
594 | static int radeon_ttm_backend_bind(struct ttm_tt *ttm, |
595 | struct ttm_mem_reg *bo_mem) | |
596 | { | |
8e7e7052 | 597 | struct radeon_ttm_tt *gtt = (void*)ttm; |
77497f27 MD |
598 | uint32_t flags = RADEON_GART_PAGE_VALID | RADEON_GART_PAGE_READ | |
599 | RADEON_GART_PAGE_WRITE; | |
649bf3ca JG |
600 | int r; |
601 | ||
f72a113a CK |
602 | if (gtt->userptr) { |
603 | radeon_ttm_tt_pin_userptr(ttm); | |
604 | flags &= ~RADEON_GART_PAGE_WRITE; | |
605 | } | |
606 | ||
649bf3ca JG |
607 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
608 | if (!ttm->num_pages) { | |
609 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
610 | ttm->num_pages, bo_mem, ttm); | |
611 | } | |
77497f27 MD |
612 | if (ttm->caching_state == tt_cached) |
613 | flags |= RADEON_GART_PAGE_SNOOP; | |
614 | r = radeon_gart_bind(gtt->rdev, gtt->offset, ttm->num_pages, | |
615 | ttm->pages, gtt->ttm.dma_address, flags); | |
649bf3ca JG |
616 | if (r) { |
617 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
618 | ttm->num_pages, (unsigned)gtt->offset); | |
619 | return r; | |
620 | } | |
621 | return 0; | |
622 | } | |
623 | ||
624 | static int radeon_ttm_backend_unbind(struct ttm_tt *ttm) | |
625 | { | |
8e7e7052 | 626 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 627 | |
649bf3ca | 628 | radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages); |
f72a113a CK |
629 | |
630 | if (gtt->userptr) | |
631 | radeon_ttm_tt_unpin_userptr(ttm); | |
632 | ||
649bf3ca JG |
633 | return 0; |
634 | } | |
635 | ||
636 | static void radeon_ttm_backend_destroy(struct ttm_tt *ttm) | |
637 | { | |
8e7e7052 | 638 | struct radeon_ttm_tt *gtt = (void *)ttm; |
649bf3ca | 639 | |
8e7e7052 | 640 | ttm_dma_tt_fini(>t->ttm); |
649bf3ca JG |
641 | kfree(gtt); |
642 | } | |
643 | ||
644 | static struct ttm_backend_func radeon_backend_func = { | |
645 | .bind = &radeon_ttm_backend_bind, | |
646 | .unbind = &radeon_ttm_backend_unbind, | |
647 | .destroy = &radeon_ttm_backend_destroy, | |
648 | }; | |
649 | ||
1109ca09 | 650 | static struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev, |
649bf3ca JG |
651 | unsigned long size, uint32_t page_flags, |
652 | struct page *dummy_read_page) | |
653 | { | |
654 | struct radeon_device *rdev; | |
655 | struct radeon_ttm_tt *gtt; | |
656 | ||
657 | rdev = radeon_get_rdev(bdev); | |
658 | #if __OS_HAS_AGP | |
659 | if (rdev->flags & RADEON_IS_AGP) { | |
660 | return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge, | |
661 | size, page_flags, dummy_read_page); | |
662 | } | |
663 | #endif | |
664 | ||
665 | gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL); | |
666 | if (gtt == NULL) { | |
667 | return NULL; | |
668 | } | |
8e7e7052 | 669 | gtt->ttm.ttm.func = &radeon_backend_func; |
649bf3ca | 670 | gtt->rdev = rdev; |
8e7e7052 JG |
671 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { |
672 | kfree(gtt); | |
649bf3ca JG |
673 | return NULL; |
674 | } | |
8e7e7052 | 675 | return >t->ttm.ttm; |
649bf3ca JG |
676 | } |
677 | ||
3840a656 CK |
678 | static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct ttm_tt *ttm) |
679 | { | |
680 | if (!ttm || ttm->func != &radeon_backend_func) | |
681 | return NULL; | |
682 | return (struct radeon_ttm_tt *)ttm; | |
683 | } | |
684 | ||
c52494f6 KRW |
685 | static int radeon_ttm_tt_populate(struct ttm_tt *ttm) |
686 | { | |
3840a656 | 687 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
c52494f6 KRW |
688 | struct radeon_device *rdev; |
689 | unsigned i; | |
690 | int r; | |
40f5cf99 | 691 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
c52494f6 KRW |
692 | |
693 | if (ttm->state != tt_unpopulated) | |
694 | return 0; | |
695 | ||
3840a656 | 696 | if (gtt && gtt->userptr) { |
f72a113a CK |
697 | ttm->sg = kcalloc(1, sizeof(struct sg_table), GFP_KERNEL); |
698 | if (!ttm->sg) | |
699 | return -ENOMEM; | |
700 | ||
701 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | |
702 | ttm->state = tt_unbound; | |
703 | return 0; | |
704 | } | |
705 | ||
40f5cf99 AD |
706 | if (slave && ttm->sg) { |
707 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
708 | gtt->ttm.dma_address, ttm->num_pages); | |
709 | ttm->state = tt_unbound; | |
710 | return 0; | |
711 | } | |
712 | ||
c52494f6 | 713 | rdev = radeon_get_rdev(ttm->bdev); |
dea7e0ac JG |
714 | #if __OS_HAS_AGP |
715 | if (rdev->flags & RADEON_IS_AGP) { | |
716 | return ttm_agp_tt_populate(ttm); | |
717 | } | |
718 | #endif | |
c52494f6 KRW |
719 | |
720 | #ifdef CONFIG_SWIOTLB | |
721 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 722 | return ttm_dma_populate(>t->ttm, rdev->dev); |
c52494f6 KRW |
723 | } |
724 | #endif | |
725 | ||
726 | r = ttm_pool_populate(ttm); | |
727 | if (r) { | |
728 | return r; | |
729 | } | |
730 | ||
731 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
732 | gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i], |
733 | 0, PAGE_SIZE, | |
734 | PCI_DMA_BIDIRECTIONAL); | |
735 | if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) { | |
c52494f6 | 736 | while (--i) { |
8e7e7052 | 737 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], |
c52494f6 | 738 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
8e7e7052 | 739 | gtt->ttm.dma_address[i] = 0; |
c52494f6 KRW |
740 | } |
741 | ttm_pool_unpopulate(ttm); | |
742 | return -EFAULT; | |
743 | } | |
744 | } | |
745 | return 0; | |
746 | } | |
747 | ||
748 | static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
749 | { | |
750 | struct radeon_device *rdev; | |
3840a656 | 751 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
c52494f6 | 752 | unsigned i; |
40f5cf99 AD |
753 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); |
754 | ||
3840a656 | 755 | if (gtt && gtt->userptr) { |
f72a113a CK |
756 | kfree(ttm->sg); |
757 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | |
758 | return; | |
759 | } | |
760 | ||
40f5cf99 AD |
761 | if (slave) |
762 | return; | |
c52494f6 KRW |
763 | |
764 | rdev = radeon_get_rdev(ttm->bdev); | |
dea7e0ac JG |
765 | #if __OS_HAS_AGP |
766 | if (rdev->flags & RADEON_IS_AGP) { | |
767 | ttm_agp_tt_unpopulate(ttm); | |
768 | return; | |
769 | } | |
770 | #endif | |
c52494f6 KRW |
771 | |
772 | #ifdef CONFIG_SWIOTLB | |
773 | if (swiotlb_nr_tbl()) { | |
8e7e7052 | 774 | ttm_dma_unpopulate(>t->ttm, rdev->dev); |
c52494f6 KRW |
775 | return; |
776 | } | |
777 | #endif | |
778 | ||
779 | for (i = 0; i < ttm->num_pages; i++) { | |
8e7e7052 JG |
780 | if (gtt->ttm.dma_address[i]) { |
781 | pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i], | |
c52494f6 KRW |
782 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
783 | } | |
784 | } | |
785 | ||
786 | ttm_pool_unpopulate(ttm); | |
787 | } | |
649bf3ca | 788 | |
f72a113a CK |
789 | int radeon_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, |
790 | uint32_t flags) | |
791 | { | |
3840a656 | 792 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
f72a113a CK |
793 | |
794 | if (gtt == NULL) | |
795 | return -EINVAL; | |
796 | ||
797 | gtt->userptr = addr; | |
798 | gtt->usermm = current->mm; | |
799 | gtt->userflags = flags; | |
800 | return 0; | |
801 | } | |
802 | ||
803 | bool radeon_ttm_tt_has_userptr(struct ttm_tt *ttm) | |
804 | { | |
3840a656 | 805 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
f72a113a CK |
806 | |
807 | if (gtt == NULL) | |
808 | return false; | |
809 | ||
810 | return !!gtt->userptr; | |
811 | } | |
812 | ||
813 | bool radeon_ttm_tt_is_readonly(struct ttm_tt *ttm) | |
814 | { | |
3840a656 | 815 | struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); |
f72a113a CK |
816 | |
817 | if (gtt == NULL) | |
818 | return false; | |
819 | ||
820 | return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); | |
821 | } | |
822 | ||
771fe6b9 | 823 | static struct ttm_bo_driver radeon_bo_driver = { |
649bf3ca | 824 | .ttm_tt_create = &radeon_ttm_tt_create, |
c52494f6 KRW |
825 | .ttm_tt_populate = &radeon_ttm_tt_populate, |
826 | .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, | |
771fe6b9 JG |
827 | .invalidate_caches = &radeon_invalidate_caches, |
828 | .init_mem_type = &radeon_init_mem_type, | |
829 | .evict_flags = &radeon_evict_flags, | |
830 | .move = &radeon_bo_move, | |
831 | .verify_access = &radeon_verify_access, | |
e024e110 DA |
832 | .move_notify = &radeon_bo_move_notify, |
833 | .fault_reserve_notify = &radeon_bo_fault_reserve_notify, | |
0a2d50e3 JG |
834 | .io_mem_reserve = &radeon_ttm_io_mem_reserve, |
835 | .io_mem_free = &radeon_ttm_io_mem_free, | |
771fe6b9 JG |
836 | }; |
837 | ||
838 | int radeon_ttm_init(struct radeon_device *rdev) | |
839 | { | |
840 | int r; | |
841 | ||
842 | r = radeon_ttm_global_init(rdev); | |
843 | if (r) { | |
844 | return r; | |
845 | } | |
846 | /* No others user of address space so set it to 0 */ | |
847 | r = ttm_bo_device_init(&rdev->mman.bdev, | |
a987fcaa | 848 | rdev->mman.bo_global_ref.ref.object, |
44d847b7 DH |
849 | &radeon_bo_driver, |
850 | rdev->ddev->anon_inode->i_mapping, | |
851 | DRM_FILE_PAGE_OFFSET, | |
ad49f501 | 852 | rdev->need_dma32); |
771fe6b9 JG |
853 | if (r) { |
854 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
855 | return r; | |
856 | } | |
0a0c7596 | 857 | rdev->mman.initialized = true; |
4c788679 | 858 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, |
312ea8da | 859 | rdev->mc.real_vram_size >> PAGE_SHIFT); |
771fe6b9 JG |
860 | if (r) { |
861 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
862 | return r; | |
863 | } | |
14eedc32 LK |
864 | /* Change the size here instead of the init above so only lpfn is affected */ |
865 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); | |
866 | ||
441921d5 | 867 | r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, |
02376d82 | 868 | RADEON_GEM_DOMAIN_VRAM, 0, |
40f5cf99 | 869 | NULL, &rdev->stollen_vga_memory); |
771fe6b9 JG |
870 | if (r) { |
871 | return r; | |
872 | } | |
4c788679 JG |
873 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
874 | if (r) | |
875 | return r; | |
876 | r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL); | |
877 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
771fe6b9 | 878 | if (r) { |
4c788679 | 879 | radeon_bo_unref(&rdev->stollen_vga_memory); |
771fe6b9 JG |
880 | return r; |
881 | } | |
882 | DRM_INFO("radeon: %uM of VRAM memory ready\n", | |
fc986034 | 883 | (unsigned) (rdev->mc.real_vram_size / (1024 * 1024))); |
4c788679 | 884 | r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, |
312ea8da | 885 | rdev->mc.gtt_size >> PAGE_SHIFT); |
771fe6b9 JG |
886 | if (r) { |
887 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
888 | return r; | |
889 | } | |
890 | DRM_INFO("radeon: %uM of GTT memory ready.\n", | |
3ce0a23d | 891 | (unsigned)(rdev->mc.gtt_size / (1024 * 1024))); |
fa8a1238 DA |
892 | |
893 | r = radeon_ttm_debugfs_init(rdev); | |
894 | if (r) { | |
895 | DRM_ERROR("Failed to init debugfs\n"); | |
896 | return r; | |
897 | } | |
771fe6b9 JG |
898 | return 0; |
899 | } | |
900 | ||
901 | void radeon_ttm_fini(struct radeon_device *rdev) | |
902 | { | |
4c788679 JG |
903 | int r; |
904 | ||
0a0c7596 JG |
905 | if (!rdev->mman.initialized) |
906 | return; | |
2014b569 | 907 | radeon_ttm_debugfs_fini(rdev); |
771fe6b9 | 908 | if (rdev->stollen_vga_memory) { |
4c788679 JG |
909 | r = radeon_bo_reserve(rdev->stollen_vga_memory, false); |
910 | if (r == 0) { | |
911 | radeon_bo_unpin(rdev->stollen_vga_memory); | |
912 | radeon_bo_unreserve(rdev->stollen_vga_memory); | |
913 | } | |
914 | radeon_bo_unref(&rdev->stollen_vga_memory); | |
771fe6b9 JG |
915 | } |
916 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
917 | ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT); | |
918 | ttm_bo_device_release(&rdev->mman.bdev); | |
919 | radeon_gart_fini(rdev); | |
920 | radeon_ttm_global_fini(rdev); | |
0a0c7596 | 921 | rdev->mman.initialized = false; |
771fe6b9 JG |
922 | DRM_INFO("radeon: ttm finalized\n"); |
923 | } | |
924 | ||
53595338 DA |
925 | /* this should only be called at bootup or when userspace |
926 | * isn't running */ | |
927 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | |
928 | { | |
929 | struct ttm_mem_type_manager *man; | |
930 | ||
931 | if (!rdev->mman.initialized) | |
932 | return; | |
933 | ||
934 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | |
935 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
936 | man->size = size >> PAGE_SHIFT; | |
937 | } | |
938 | ||
771fe6b9 | 939 | static struct vm_operations_struct radeon_ttm_vm_ops; |
f0f37e2f | 940 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
771fe6b9 JG |
941 | |
942 | static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
943 | { | |
944 | struct ttm_buffer_object *bo; | |
5876dd24 | 945 | struct radeon_device *rdev; |
771fe6b9 JG |
946 | int r; |
947 | ||
5876dd24 | 948 | bo = (struct ttm_buffer_object *)vma->vm_private_data; |
771fe6b9 JG |
949 | if (bo == NULL) { |
950 | return VM_FAULT_NOPAGE; | |
951 | } | |
5876dd24 | 952 | rdev = radeon_get_rdev(bo->bdev); |
db7fce39 | 953 | down_read(&rdev->pm.mclk_lock); |
771fe6b9 | 954 | r = ttm_vm_ops->fault(vma, vmf); |
db7fce39 | 955 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 JG |
956 | return r; |
957 | } | |
958 | ||
959 | int radeon_mmap(struct file *filp, struct vm_area_struct *vma) | |
960 | { | |
961 | struct drm_file *file_priv; | |
962 | struct radeon_device *rdev; | |
963 | int r; | |
964 | ||
965 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
884c6dab | 966 | return -EINVAL; |
771fe6b9 JG |
967 | } |
968 | ||
40b3be3f | 969 | file_priv = filp->private_data; |
771fe6b9 JG |
970 | rdev = file_priv->minor->dev->dev_private; |
971 | if (rdev == NULL) { | |
972 | return -EINVAL; | |
973 | } | |
974 | r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); | |
975 | if (unlikely(r != 0)) { | |
976 | return r; | |
977 | } | |
978 | if (unlikely(ttm_vm_ops == NULL)) { | |
979 | ttm_vm_ops = vma->vm_ops; | |
980 | radeon_ttm_vm_ops = *ttm_vm_ops; | |
981 | radeon_ttm_vm_ops.fault = &radeon_ttm_fault; | |
982 | } | |
983 | vma->vm_ops = &radeon_ttm_vm_ops; | |
984 | return 0; | |
985 | } | |
986 | ||
fa8a1238 | 987 | #if defined(CONFIG_DEBUG_FS) |
893d6e6e | 988 | |
fa8a1238 DA |
989 | static int radeon_mm_dump_table(struct seq_file *m, void *data) |
990 | { | |
991 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
893d6e6e | 992 | unsigned ttm_pl = *(int *)node->info_ent->data; |
fa8a1238 DA |
993 | struct drm_device *dev = node->minor->dev; |
994 | struct radeon_device *rdev = dev->dev_private; | |
893d6e6e | 995 | struct drm_mm *mm = (struct drm_mm *)rdev->mman.bdev.man[ttm_pl].priv; |
fa8a1238 DA |
996 | int ret; |
997 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | |
998 | ||
999 | spin_lock(&glob->lru_lock); | |
1000 | ret = drm_mm_dump_table(m, mm); | |
1001 | spin_unlock(&glob->lru_lock); | |
1002 | return ret; | |
1003 | } | |
893d6e6e CK |
1004 | |
1005 | static int ttm_pl_vram = TTM_PL_VRAM; | |
1006 | static int ttm_pl_tt = TTM_PL_TT; | |
1007 | ||
1008 | static struct drm_info_list radeon_ttm_debugfs_list[] = { | |
1009 | {"radeon_vram_mm", radeon_mm_dump_table, 0, &ttm_pl_vram}, | |
1010 | {"radeon_gtt_mm", radeon_mm_dump_table, 0, &ttm_pl_tt}, | |
1011 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | |
1012 | #ifdef CONFIG_SWIOTLB | |
1013 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | |
1014 | #endif | |
1015 | }; | |
1016 | ||
2014b569 CK |
1017 | static int radeon_ttm_vram_open(struct inode *inode, struct file *filep) |
1018 | { | |
1019 | struct radeon_device *rdev = inode->i_private; | |
1020 | i_size_write(inode, rdev->mc.mc_vram_size); | |
1021 | filep->private_data = inode->i_private; | |
1022 | return 0; | |
1023 | } | |
1024 | ||
1025 | static ssize_t radeon_ttm_vram_read(struct file *f, char __user *buf, | |
1026 | size_t size, loff_t *pos) | |
1027 | { | |
1028 | struct radeon_device *rdev = f->private_data; | |
1029 | ssize_t result = 0; | |
1030 | int r; | |
1031 | ||
1032 | if (size & 0x3 || *pos & 0x3) | |
1033 | return -EINVAL; | |
1034 | ||
1035 | while (size) { | |
1036 | unsigned long flags; | |
1037 | uint32_t value; | |
1038 | ||
1039 | if (*pos >= rdev->mc.mc_vram_size) | |
1040 | return result; | |
1041 | ||
1042 | spin_lock_irqsave(&rdev->mmio_idx_lock, flags); | |
1043 | WREG32(RADEON_MM_INDEX, ((uint32_t)*pos) | 0x80000000); | |
1044 | if (rdev->family >= CHIP_CEDAR) | |
1045 | WREG32(EVERGREEN_MM_INDEX_HI, *pos >> 31); | |
1046 | value = RREG32(RADEON_MM_DATA); | |
1047 | spin_unlock_irqrestore(&rdev->mmio_idx_lock, flags); | |
1048 | ||
1049 | r = put_user(value, (uint32_t *)buf); | |
1050 | if (r) | |
1051 | return r; | |
1052 | ||
1053 | result += 4; | |
1054 | buf += 4; | |
1055 | *pos += 4; | |
1056 | size -= 4; | |
1057 | } | |
1058 | ||
1059 | return result; | |
1060 | } | |
1061 | ||
1062 | static const struct file_operations radeon_ttm_vram_fops = { | |
1063 | .owner = THIS_MODULE, | |
1064 | .open = radeon_ttm_vram_open, | |
1065 | .read = radeon_ttm_vram_read, | |
1066 | .llseek = default_llseek | |
1067 | }; | |
1068 | ||
dd66d20e CK |
1069 | static int radeon_ttm_gtt_open(struct inode *inode, struct file *filep) |
1070 | { | |
1071 | struct radeon_device *rdev = inode->i_private; | |
1072 | i_size_write(inode, rdev->mc.gtt_size); | |
1073 | filep->private_data = inode->i_private; | |
1074 | return 0; | |
1075 | } | |
1076 | ||
1077 | static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, | |
1078 | size_t size, loff_t *pos) | |
1079 | { | |
1080 | struct radeon_device *rdev = f->private_data; | |
1081 | ssize_t result = 0; | |
1082 | int r; | |
1083 | ||
1084 | while (size) { | |
1085 | loff_t p = *pos / PAGE_SIZE; | |
1086 | unsigned off = *pos & ~PAGE_MASK; | |
0d997b68 | 1087 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); |
dd66d20e CK |
1088 | struct page *page; |
1089 | void *ptr; | |
1090 | ||
1091 | if (p >= rdev->gart.num_cpu_pages) | |
1092 | return result; | |
1093 | ||
1094 | page = rdev->gart.pages[p]; | |
1095 | if (page) { | |
1096 | ptr = kmap(page); | |
1097 | ptr += off; | |
1098 | ||
1099 | r = copy_to_user(buf, ptr, cur_size); | |
1100 | kunmap(rdev->gart.pages[p]); | |
1101 | } else | |
1102 | r = clear_user(buf, cur_size); | |
1103 | ||
1104 | if (r) | |
1105 | return -EFAULT; | |
1106 | ||
1107 | result += cur_size; | |
1108 | buf += cur_size; | |
1109 | *pos += cur_size; | |
1110 | size -= cur_size; | |
1111 | } | |
1112 | ||
1113 | return result; | |
1114 | } | |
1115 | ||
1116 | static const struct file_operations radeon_ttm_gtt_fops = { | |
1117 | .owner = THIS_MODULE, | |
1118 | .open = radeon_ttm_gtt_open, | |
1119 | .read = radeon_ttm_gtt_read, | |
1120 | .llseek = default_llseek | |
1121 | }; | |
1122 | ||
fa8a1238 DA |
1123 | #endif |
1124 | ||
1125 | static int radeon_ttm_debugfs_init(struct radeon_device *rdev) | |
1126 | { | |
f4e45d02 | 1127 | #if defined(CONFIG_DEBUG_FS) |
2014b569 CK |
1128 | unsigned count; |
1129 | ||
1130 | struct drm_minor *minor = rdev->ddev->primary; | |
1131 | struct dentry *ent, *root = minor->debugfs_root; | |
1132 | ||
1133 | ent = debugfs_create_file("radeon_vram", S_IFREG | S_IRUGO, root, | |
1134 | rdev, &radeon_ttm_vram_fops); | |
1135 | if (IS_ERR(ent)) | |
1136 | return PTR_ERR(ent); | |
1137 | rdev->mman.vram = ent; | |
1138 | ||
dd66d20e CK |
1139 | ent = debugfs_create_file("radeon_gtt", S_IFREG | S_IRUGO, root, |
1140 | rdev, &radeon_ttm_gtt_fops); | |
1141 | if (IS_ERR(ent)) | |
1142 | return PTR_ERR(ent); | |
1143 | rdev->mman.gtt = ent; | |
1144 | ||
2014b569 | 1145 | count = ARRAY_SIZE(radeon_ttm_debugfs_list); |
fa8a1238 | 1146 | |
c52494f6 | 1147 | #ifdef CONFIG_SWIOTLB |
893d6e6e CK |
1148 | if (!swiotlb_nr_tbl()) |
1149 | --count; | |
c52494f6 | 1150 | #endif |
fa8a1238 | 1151 | |
893d6e6e CK |
1152 | return radeon_debugfs_add_files(rdev, radeon_ttm_debugfs_list, count); |
1153 | #else | |
1154 | ||
fa8a1238 | 1155 | return 0; |
893d6e6e | 1156 | #endif |
fa8a1238 | 1157 | } |
2014b569 CK |
1158 | |
1159 | static void radeon_ttm_debugfs_fini(struct radeon_device *rdev) | |
1160 | { | |
1161 | #if defined(CONFIG_DEBUG_FS) | |
1162 | ||
1163 | debugfs_remove(rdev->mman.vram); | |
1164 | rdev->mman.vram = NULL; | |
dd66d20e CK |
1165 | |
1166 | debugfs_remove(rdev->mman.gtt); | |
1167 | rdev->mman.gtt = NULL; | |
2014b569 CK |
1168 | #endif |
1169 | } |