Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
36 | #include <ttm/ttm_page_alloc.h> | |
37 | #include <drm/drmP.h> | |
38 | #include <drm/amdgpu_drm.h> | |
39 | #include <linux/seq_file.h> | |
40 | #include <linux/slab.h> | |
41 | #include <linux/swiotlb.h> | |
42 | #include <linux/swap.h> | |
43 | #include <linux/pagemap.h> | |
44 | #include <linux/debugfs.h> | |
45 | #include "amdgpu.h" | |
46 | #include "bif/bif_4_1_d.h" | |
47 | ||
48 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
49 | ||
50 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | |
51 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | |
52 | ||
53 | static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) | |
54 | { | |
55 | struct amdgpu_mman *mman; | |
56 | struct amdgpu_device *adev; | |
57 | ||
58 | mman = container_of(bdev, struct amdgpu_mman, bdev); | |
59 | adev = container_of(mman, struct amdgpu_device, mman); | |
60 | return adev; | |
61 | } | |
62 | ||
63 | ||
64 | /* | |
65 | * Global memory. | |
66 | */ | |
67 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) | |
68 | { | |
69 | return ttm_mem_global_init(ref->object); | |
70 | } | |
71 | ||
72 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) | |
73 | { | |
74 | ttm_mem_global_release(ref->object); | |
75 | } | |
76 | ||
77 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) | |
78 | { | |
79 | struct drm_global_reference *global_ref; | |
703297c1 CK |
80 | struct amdgpu_ring *ring; |
81 | struct amd_sched_rq *rq; | |
d38ceaf9 AD |
82 | int r; |
83 | ||
84 | adev->mman.mem_global_referenced = false; | |
85 | global_ref = &adev->mman.mem_global_ref; | |
86 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | |
87 | global_ref->size = sizeof(struct ttm_mem_global); | |
88 | global_ref->init = &amdgpu_ttm_mem_global_init; | |
89 | global_ref->release = &amdgpu_ttm_mem_global_release; | |
90 | r = drm_global_item_ref(global_ref); | |
91 | if (r != 0) { | |
92 | DRM_ERROR("Failed setting up TTM memory accounting " | |
93 | "subsystem.\n"); | |
94 | return r; | |
95 | } | |
96 | ||
97 | adev->mman.bo_global_ref.mem_glob = | |
98 | adev->mman.mem_global_ref.object; | |
99 | global_ref = &adev->mman.bo_global_ref.ref; | |
100 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | |
101 | global_ref->size = sizeof(struct ttm_bo_global); | |
102 | global_ref->init = &ttm_bo_global_init; | |
103 | global_ref->release = &ttm_bo_global_release; | |
104 | r = drm_global_item_ref(global_ref); | |
105 | if (r != 0) { | |
106 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
107 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
108 | return r; | |
109 | } | |
110 | ||
703297c1 CK |
111 | ring = adev->mman.buffer_funcs_ring; |
112 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; | |
113 | r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, | |
114 | rq, amdgpu_sched_jobs); | |
115 | if (r != 0) { | |
116 | DRM_ERROR("Failed setting up TTM BO move run queue.\n"); | |
117 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
118 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | |
119 | return r; | |
120 | } | |
121 | ||
d38ceaf9 | 122 | adev->mman.mem_global_referenced = true; |
703297c1 | 123 | |
d38ceaf9 AD |
124 | return 0; |
125 | } | |
126 | ||
127 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) | |
128 | { | |
129 | if (adev->mman.mem_global_referenced) { | |
703297c1 CK |
130 | amd_sched_entity_fini(adev->mman.entity.sched, |
131 | &adev->mman.entity); | |
d38ceaf9 AD |
132 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
133 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
134 | adev->mman.mem_global_referenced = false; | |
135 | } | |
136 | } | |
137 | ||
138 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
139 | { | |
140 | return 0; | |
141 | } | |
142 | ||
143 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
144 | struct ttm_mem_type_manager *man) | |
145 | { | |
146 | struct amdgpu_device *adev; | |
147 | ||
148 | adev = amdgpu_get_adev(bdev); | |
149 | ||
150 | switch (type) { | |
151 | case TTM_PL_SYSTEM: | |
152 | /* System memory */ | |
153 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
154 | man->available_caching = TTM_PL_MASK_CACHING; | |
155 | man->default_caching = TTM_PL_FLAG_CACHED; | |
156 | break; | |
157 | case TTM_PL_TT: | |
158 | man->func = &ttm_bo_manager_func; | |
159 | man->gpu_offset = adev->mc.gtt_start; | |
160 | man->available_caching = TTM_PL_MASK_CACHING; | |
161 | man->default_caching = TTM_PL_FLAG_CACHED; | |
162 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | |
163 | break; | |
164 | case TTM_PL_VRAM: | |
165 | /* "On-card" video ram */ | |
166 | man->func = &ttm_bo_manager_func; | |
167 | man->gpu_offset = adev->mc.vram_start; | |
168 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
169 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
170 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
171 | man->default_caching = TTM_PL_FLAG_WC; | |
172 | break; | |
173 | case AMDGPU_PL_GDS: | |
174 | case AMDGPU_PL_GWS: | |
175 | case AMDGPU_PL_OA: | |
176 | /* On-chip GDS memory*/ | |
177 | man->func = &ttm_bo_manager_func; | |
178 | man->gpu_offset = 0; | |
179 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; | |
180 | man->available_caching = TTM_PL_FLAG_UNCACHED; | |
181 | man->default_caching = TTM_PL_FLAG_UNCACHED; | |
182 | break; | |
183 | default: | |
184 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
185 | return -EINVAL; | |
186 | } | |
187 | return 0; | |
188 | } | |
189 | ||
190 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |
191 | struct ttm_placement *placement) | |
192 | { | |
193 | struct amdgpu_bo *rbo; | |
194 | static struct ttm_place placements = { | |
195 | .fpfn = 0, | |
196 | .lpfn = 0, | |
197 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | |
198 | }; | |
199 | ||
200 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { | |
201 | placement->placement = &placements; | |
202 | placement->busy_placement = &placements; | |
203 | placement->num_placement = 1; | |
204 | placement->num_busy_placement = 1; | |
205 | return; | |
206 | } | |
207 | rbo = container_of(bo, struct amdgpu_bo, tbo); | |
208 | switch (bo->mem.mem_type) { | |
209 | case TTM_PL_VRAM: | |
210 | if (rbo->adev->mman.buffer_funcs_ring->ready == false) | |
211 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | |
212 | else | |
213 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT); | |
214 | break; | |
215 | case TTM_PL_TT: | |
216 | default: | |
217 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | |
218 | } | |
219 | *placement = rbo->placement; | |
220 | } | |
221 | ||
222 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
223 | { | |
224 | struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); | |
225 | ||
226 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); | |
227 | } | |
228 | ||
229 | static void amdgpu_move_null(struct ttm_buffer_object *bo, | |
230 | struct ttm_mem_reg *new_mem) | |
231 | { | |
232 | struct ttm_mem_reg *old_mem = &bo->mem; | |
233 | ||
234 | BUG_ON(old_mem->mm_node != NULL); | |
235 | *old_mem = *new_mem; | |
236 | new_mem->mm_node = NULL; | |
237 | } | |
238 | ||
239 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |
240 | bool evict, bool no_wait_gpu, | |
241 | struct ttm_mem_reg *new_mem, | |
242 | struct ttm_mem_reg *old_mem) | |
243 | { | |
244 | struct amdgpu_device *adev; | |
245 | struct amdgpu_ring *ring; | |
246 | uint64_t old_start, new_start; | |
c7ae72c0 | 247 | struct fence *fence; |
d38ceaf9 AD |
248 | int r; |
249 | ||
250 | adev = amdgpu_get_adev(bo->bdev); | |
251 | ring = adev->mman.buffer_funcs_ring; | |
252 | old_start = old_mem->start << PAGE_SHIFT; | |
253 | new_start = new_mem->start << PAGE_SHIFT; | |
254 | ||
255 | switch (old_mem->mem_type) { | |
256 | case TTM_PL_VRAM: | |
257 | old_start += adev->mc.vram_start; | |
258 | break; | |
259 | case TTM_PL_TT: | |
260 | old_start += adev->mc.gtt_start; | |
261 | break; | |
262 | default: | |
263 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
264 | return -EINVAL; | |
265 | } | |
266 | switch (new_mem->mem_type) { | |
267 | case TTM_PL_VRAM: | |
268 | new_start += adev->mc.vram_start; | |
269 | break; | |
270 | case TTM_PL_TT: | |
271 | new_start += adev->mc.gtt_start; | |
272 | break; | |
273 | default: | |
274 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
275 | return -EINVAL; | |
276 | } | |
277 | if (!ring->ready) { | |
278 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | |
279 | return -EINVAL; | |
280 | } | |
281 | ||
282 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | |
283 | ||
284 | r = amdgpu_copy_buffer(ring, old_start, new_start, | |
285 | new_mem->num_pages * PAGE_SIZE, /* bytes */ | |
286 | bo->resv, &fence); | |
287 | /* FIXME: handle copy error */ | |
c7ae72c0 | 288 | r = ttm_bo_move_accel_cleanup(bo, fence, |
d38ceaf9 | 289 | evict, no_wait_gpu, new_mem); |
c7ae72c0 | 290 | fence_put(fence); |
d38ceaf9 AD |
291 | return r; |
292 | } | |
293 | ||
294 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | |
295 | bool evict, bool interruptible, | |
296 | bool no_wait_gpu, | |
297 | struct ttm_mem_reg *new_mem) | |
298 | { | |
299 | struct amdgpu_device *adev; | |
300 | struct ttm_mem_reg *old_mem = &bo->mem; | |
301 | struct ttm_mem_reg tmp_mem; | |
302 | struct ttm_place placements; | |
303 | struct ttm_placement placement; | |
304 | int r; | |
305 | ||
306 | adev = amdgpu_get_adev(bo->bdev); | |
307 | tmp_mem = *new_mem; | |
308 | tmp_mem.mm_node = NULL; | |
309 | placement.num_placement = 1; | |
310 | placement.placement = &placements; | |
311 | placement.num_busy_placement = 1; | |
312 | placement.busy_placement = &placements; | |
313 | placements.fpfn = 0; | |
314 | placements.lpfn = 0; | |
315 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
316 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
317 | interruptible, no_wait_gpu); | |
318 | if (unlikely(r)) { | |
319 | return r; | |
320 | } | |
321 | ||
322 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
323 | if (unlikely(r)) { | |
324 | goto out_cleanup; | |
325 | } | |
326 | ||
327 | r = ttm_tt_bind(bo->ttm, &tmp_mem); | |
328 | if (unlikely(r)) { | |
329 | goto out_cleanup; | |
330 | } | |
331 | r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); | |
332 | if (unlikely(r)) { | |
333 | goto out_cleanup; | |
334 | } | |
335 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); | |
336 | out_cleanup: | |
337 | ttm_bo_mem_put(bo, &tmp_mem); | |
338 | return r; | |
339 | } | |
340 | ||
341 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | |
342 | bool evict, bool interruptible, | |
343 | bool no_wait_gpu, | |
344 | struct ttm_mem_reg *new_mem) | |
345 | { | |
346 | struct amdgpu_device *adev; | |
347 | struct ttm_mem_reg *old_mem = &bo->mem; | |
348 | struct ttm_mem_reg tmp_mem; | |
349 | struct ttm_placement placement; | |
350 | struct ttm_place placements; | |
351 | int r; | |
352 | ||
353 | adev = amdgpu_get_adev(bo->bdev); | |
354 | tmp_mem = *new_mem; | |
355 | tmp_mem.mm_node = NULL; | |
356 | placement.num_placement = 1; | |
357 | placement.placement = &placements; | |
358 | placement.num_busy_placement = 1; | |
359 | placement.busy_placement = &placements; | |
360 | placements.fpfn = 0; | |
361 | placements.lpfn = 0; | |
362 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
363 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
364 | interruptible, no_wait_gpu); | |
365 | if (unlikely(r)) { | |
366 | return r; | |
367 | } | |
368 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); | |
369 | if (unlikely(r)) { | |
370 | goto out_cleanup; | |
371 | } | |
372 | r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); | |
373 | if (unlikely(r)) { | |
374 | goto out_cleanup; | |
375 | } | |
376 | out_cleanup: | |
377 | ttm_bo_mem_put(bo, &tmp_mem); | |
378 | return r; | |
379 | } | |
380 | ||
381 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, | |
382 | bool evict, bool interruptible, | |
383 | bool no_wait_gpu, | |
384 | struct ttm_mem_reg *new_mem) | |
385 | { | |
386 | struct amdgpu_device *adev; | |
387 | struct ttm_mem_reg *old_mem = &bo->mem; | |
388 | int r; | |
389 | ||
390 | adev = amdgpu_get_adev(bo->bdev); | |
391 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
392 | amdgpu_move_null(bo, new_mem); | |
393 | return 0; | |
394 | } | |
395 | if ((old_mem->mem_type == TTM_PL_TT && | |
396 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
397 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
398 | new_mem->mem_type == TTM_PL_TT)) { | |
399 | /* bind is enough */ | |
400 | amdgpu_move_null(bo, new_mem); | |
401 | return 0; | |
402 | } | |
403 | if (adev->mman.buffer_funcs == NULL || | |
404 | adev->mman.buffer_funcs_ring == NULL || | |
405 | !adev->mman.buffer_funcs_ring->ready) { | |
406 | /* use memcpy */ | |
407 | goto memcpy; | |
408 | } | |
409 | ||
410 | if (old_mem->mem_type == TTM_PL_VRAM && | |
411 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
412 | r = amdgpu_move_vram_ram(bo, evict, interruptible, | |
413 | no_wait_gpu, new_mem); | |
414 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | |
415 | new_mem->mem_type == TTM_PL_VRAM) { | |
416 | r = amdgpu_move_ram_vram(bo, evict, interruptible, | |
417 | no_wait_gpu, new_mem); | |
418 | } else { | |
419 | r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); | |
420 | } | |
421 | ||
422 | if (r) { | |
423 | memcpy: | |
424 | r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | |
425 | if (r) { | |
426 | return r; | |
427 | } | |
428 | } | |
429 | ||
430 | /* update statistics */ | |
431 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); | |
432 | return 0; | |
433 | } | |
434 | ||
435 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
436 | { | |
437 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
438 | struct amdgpu_device *adev = amdgpu_get_adev(bdev); | |
439 | ||
440 | mem->bus.addr = NULL; | |
441 | mem->bus.offset = 0; | |
442 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
443 | mem->bus.base = 0; | |
444 | mem->bus.is_iomem = false; | |
445 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
446 | return -EINVAL; | |
447 | switch (mem->mem_type) { | |
448 | case TTM_PL_SYSTEM: | |
449 | /* system memory */ | |
450 | return 0; | |
451 | case TTM_PL_TT: | |
452 | break; | |
453 | case TTM_PL_VRAM: | |
454 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
455 | /* check if it's visible */ | |
456 | if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) | |
457 | return -EINVAL; | |
458 | mem->bus.base = adev->mc.aper_base; | |
459 | mem->bus.is_iomem = true; | |
460 | #ifdef __alpha__ | |
461 | /* | |
462 | * Alpha: use bus.addr to hold the ioremap() return, | |
463 | * so we can modify bus.base below. | |
464 | */ | |
465 | if (mem->placement & TTM_PL_FLAG_WC) | |
466 | mem->bus.addr = | |
467 | ioremap_wc(mem->bus.base + mem->bus.offset, | |
468 | mem->bus.size); | |
469 | else | |
470 | mem->bus.addr = | |
471 | ioremap_nocache(mem->bus.base + mem->bus.offset, | |
472 | mem->bus.size); | |
473 | ||
474 | /* | |
475 | * Alpha: Use just the bus offset plus | |
476 | * the hose/domain memory base for bus.base. | |
477 | * It then can be used to build PTEs for VRAM | |
478 | * access, as done in ttm_bo_vm_fault(). | |
479 | */ | |
480 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | |
481 | adev->ddev->hose->dense_mem_base; | |
482 | #endif | |
483 | break; | |
484 | default: | |
485 | return -EINVAL; | |
486 | } | |
487 | return 0; | |
488 | } | |
489 | ||
490 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
491 | { | |
492 | } | |
493 | ||
494 | /* | |
495 | * TTM backend functions. | |
496 | */ | |
637dd3b5 CK |
497 | struct amdgpu_ttm_gup_task_list { |
498 | struct list_head list; | |
499 | struct task_struct *task; | |
500 | }; | |
501 | ||
d38ceaf9 | 502 | struct amdgpu_ttm_tt { |
637dd3b5 CK |
503 | struct ttm_dma_tt ttm; |
504 | struct amdgpu_device *adev; | |
505 | u64 offset; | |
506 | uint64_t userptr; | |
507 | struct mm_struct *usermm; | |
508 | uint32_t userflags; | |
509 | spinlock_t guptasklock; | |
510 | struct list_head guptasks; | |
2f568dbd | 511 | atomic_t mmu_invalidations; |
d38ceaf9 AD |
512 | }; |
513 | ||
2f568dbd | 514 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
d38ceaf9 | 515 | { |
d38ceaf9 | 516 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
d38ceaf9 | 517 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
2f568dbd CK |
518 | unsigned pinned = 0; |
519 | int r; | |
d38ceaf9 AD |
520 | |
521 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { | |
2f568dbd | 522 | /* check that we only use anonymous memory |
d38ceaf9 AD |
523 | to prevent problems with writeback */ |
524 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | |
525 | struct vm_area_struct *vma; | |
526 | ||
527 | vma = find_vma(gtt->usermm, gtt->userptr); | |
528 | if (!vma || vma->vm_file || vma->vm_end < end) | |
529 | return -EPERM; | |
530 | } | |
531 | ||
532 | do { | |
533 | unsigned num_pages = ttm->num_pages - pinned; | |
534 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | |
2f568dbd | 535 | struct page **p = pages + pinned; |
637dd3b5 CK |
536 | struct amdgpu_ttm_gup_task_list guptask; |
537 | ||
538 | guptask.task = current; | |
539 | spin_lock(>t->guptasklock); | |
540 | list_add(&guptask.list, >t->guptasks); | |
541 | spin_unlock(>t->guptasklock); | |
d38ceaf9 | 542 | |
266c73b7 | 543 | r = get_user_pages(userptr, num_pages, write, 0, p, NULL); |
637dd3b5 CK |
544 | |
545 | spin_lock(>t->guptasklock); | |
546 | list_del(&guptask.list); | |
547 | spin_unlock(>t->guptasklock); | |
d38ceaf9 | 548 | |
d38ceaf9 AD |
549 | if (r < 0) |
550 | goto release_pages; | |
551 | ||
552 | pinned += r; | |
553 | ||
554 | } while (pinned < ttm->num_pages); | |
555 | ||
2f568dbd CK |
556 | return 0; |
557 | ||
558 | release_pages: | |
559 | release_pages(pages, pinned, 0); | |
560 | return r; | |
561 | } | |
562 | ||
563 | /* prepare the sg table with the user pages */ | |
564 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | |
565 | { | |
566 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | |
567 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
568 | unsigned nents; | |
569 | int r; | |
570 | ||
571 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
572 | enum dma_data_direction direction = write ? | |
573 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
574 | ||
d38ceaf9 AD |
575 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
576 | ttm->num_pages << PAGE_SHIFT, | |
577 | GFP_KERNEL); | |
578 | if (r) | |
579 | goto release_sg; | |
580 | ||
581 | r = -ENOMEM; | |
582 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
583 | if (nents != ttm->sg->nents) | |
584 | goto release_sg; | |
585 | ||
586 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
587 | gtt->ttm.dma_address, ttm->num_pages); | |
588 | ||
589 | return 0; | |
590 | ||
591 | release_sg: | |
592 | kfree(ttm->sg); | |
d38ceaf9 AD |
593 | return r; |
594 | } | |
595 | ||
596 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |
597 | { | |
598 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | |
599 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
dd08fae1 | 600 | struct sg_page_iter sg_iter; |
d38ceaf9 AD |
601 | |
602 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
603 | enum dma_data_direction direction = write ? | |
604 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
605 | ||
606 | /* double check that we don't free the table twice */ | |
607 | if (!ttm->sg->sgl) | |
608 | return; | |
609 | ||
610 | /* free the sg table and pages again */ | |
611 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
612 | ||
dd08fae1 | 613 | for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { |
614 | struct page *page = sg_page_iter_page(&sg_iter); | |
d38ceaf9 AD |
615 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
616 | set_page_dirty(page); | |
617 | ||
618 | mark_page_accessed(page); | |
619 | page_cache_release(page); | |
620 | } | |
621 | ||
622 | sg_free_table(ttm->sg); | |
623 | } | |
624 | ||
625 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |
626 | struct ttm_mem_reg *bo_mem) | |
627 | { | |
628 | struct amdgpu_ttm_tt *gtt = (void*)ttm; | |
629 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | |
630 | int r; | |
631 | ||
e2f784fa CZ |
632 | if (gtt->userptr) { |
633 | r = amdgpu_ttm_tt_pin_userptr(ttm); | |
634 | if (r) { | |
635 | DRM_ERROR("failed to pin userptr\n"); | |
636 | return r; | |
637 | } | |
638 | } | |
d38ceaf9 AD |
639 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
640 | if (!ttm->num_pages) { | |
641 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
642 | ttm->num_pages, bo_mem, ttm); | |
643 | } | |
644 | ||
645 | if (bo_mem->mem_type == AMDGPU_PL_GDS || | |
646 | bo_mem->mem_type == AMDGPU_PL_GWS || | |
647 | bo_mem->mem_type == AMDGPU_PL_OA) | |
648 | return -EINVAL; | |
649 | ||
650 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, | |
651 | ttm->pages, gtt->ttm.dma_address, flags); | |
652 | ||
653 | if (r) { | |
654 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
655 | ttm->num_pages, (unsigned)gtt->offset); | |
656 | return r; | |
657 | } | |
658 | return 0; | |
659 | } | |
660 | ||
661 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | |
662 | { | |
663 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
664 | ||
665 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ | |
666 | if (gtt->adev->gart.ready) | |
667 | amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); | |
668 | ||
669 | if (gtt->userptr) | |
670 | amdgpu_ttm_tt_unpin_userptr(ttm); | |
671 | ||
672 | return 0; | |
673 | } | |
674 | ||
675 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) | |
676 | { | |
677 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
678 | ||
679 | ttm_dma_tt_fini(>t->ttm); | |
680 | kfree(gtt); | |
681 | } | |
682 | ||
683 | static struct ttm_backend_func amdgpu_backend_func = { | |
684 | .bind = &amdgpu_ttm_backend_bind, | |
685 | .unbind = &amdgpu_ttm_backend_unbind, | |
686 | .destroy = &amdgpu_ttm_backend_destroy, | |
687 | }; | |
688 | ||
689 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | |
690 | unsigned long size, uint32_t page_flags, | |
691 | struct page *dummy_read_page) | |
692 | { | |
693 | struct amdgpu_device *adev; | |
694 | struct amdgpu_ttm_tt *gtt; | |
695 | ||
696 | adev = amdgpu_get_adev(bdev); | |
697 | ||
698 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | |
699 | if (gtt == NULL) { | |
700 | return NULL; | |
701 | } | |
702 | gtt->ttm.ttm.func = &amdgpu_backend_func; | |
703 | gtt->adev = adev; | |
704 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { | |
705 | kfree(gtt); | |
706 | return NULL; | |
707 | } | |
708 | return >t->ttm.ttm; | |
709 | } | |
710 | ||
711 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | |
712 | { | |
713 | struct amdgpu_device *adev; | |
714 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
715 | unsigned i; | |
716 | int r; | |
717 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | |
718 | ||
719 | if (ttm->state != tt_unpopulated) | |
720 | return 0; | |
721 | ||
722 | if (gtt && gtt->userptr) { | |
5f0b34cc | 723 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
d38ceaf9 AD |
724 | if (!ttm->sg) |
725 | return -ENOMEM; | |
726 | ||
727 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | |
728 | ttm->state = tt_unbound; | |
729 | return 0; | |
730 | } | |
731 | ||
732 | if (slave && ttm->sg) { | |
733 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
734 | gtt->ttm.dma_address, ttm->num_pages); | |
735 | ttm->state = tt_unbound; | |
736 | return 0; | |
737 | } | |
738 | ||
739 | adev = amdgpu_get_adev(ttm->bdev); | |
740 | ||
741 | #ifdef CONFIG_SWIOTLB | |
742 | if (swiotlb_nr_tbl()) { | |
743 | return ttm_dma_populate(>t->ttm, adev->dev); | |
744 | } | |
745 | #endif | |
746 | ||
747 | r = ttm_pool_populate(ttm); | |
748 | if (r) { | |
749 | return r; | |
750 | } | |
751 | ||
752 | for (i = 0; i < ttm->num_pages; i++) { | |
753 | gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i], | |
754 | 0, PAGE_SIZE, | |
755 | PCI_DMA_BIDIRECTIONAL); | |
756 | if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { | |
09ccbb74 | 757 | while (i--) { |
d38ceaf9 AD |
758 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], |
759 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
760 | gtt->ttm.dma_address[i] = 0; | |
761 | } | |
762 | ttm_pool_unpopulate(ttm); | |
763 | return -EFAULT; | |
764 | } | |
765 | } | |
766 | return 0; | |
767 | } | |
768 | ||
769 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
770 | { | |
771 | struct amdgpu_device *adev; | |
772 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
773 | unsigned i; | |
774 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | |
775 | ||
776 | if (gtt && gtt->userptr) { | |
777 | kfree(ttm->sg); | |
778 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | |
779 | return; | |
780 | } | |
781 | ||
782 | if (slave) | |
783 | return; | |
784 | ||
785 | adev = amdgpu_get_adev(ttm->bdev); | |
786 | ||
787 | #ifdef CONFIG_SWIOTLB | |
788 | if (swiotlb_nr_tbl()) { | |
789 | ttm_dma_unpopulate(>t->ttm, adev->dev); | |
790 | return; | |
791 | } | |
792 | #endif | |
793 | ||
794 | for (i = 0; i < ttm->num_pages; i++) { | |
795 | if (gtt->ttm.dma_address[i]) { | |
796 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], | |
797 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
798 | } | |
799 | } | |
800 | ||
801 | ttm_pool_unpopulate(ttm); | |
802 | } | |
803 | ||
804 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | |
805 | uint32_t flags) | |
806 | { | |
807 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
808 | ||
809 | if (gtt == NULL) | |
810 | return -EINVAL; | |
811 | ||
812 | gtt->userptr = addr; | |
813 | gtt->usermm = current->mm; | |
814 | gtt->userflags = flags; | |
637dd3b5 CK |
815 | spin_lock_init(>t->guptasklock); |
816 | INIT_LIST_HEAD(>t->guptasks); | |
2f568dbd | 817 | atomic_set(>t->mmu_invalidations, 0); |
637dd3b5 | 818 | |
d38ceaf9 AD |
819 | return 0; |
820 | } | |
821 | ||
cc325d19 | 822 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
d38ceaf9 AD |
823 | { |
824 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
825 | ||
826 | if (gtt == NULL) | |
cc325d19 | 827 | return NULL; |
d38ceaf9 | 828 | |
cc325d19 | 829 | return gtt->usermm; |
d38ceaf9 AD |
830 | } |
831 | ||
cc1de6e8 CK |
832 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
833 | unsigned long end) | |
834 | { | |
835 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
637dd3b5 | 836 | struct amdgpu_ttm_gup_task_list *entry; |
cc1de6e8 CK |
837 | unsigned long size; |
838 | ||
637dd3b5 | 839 | if (gtt == NULL || !gtt->userptr) |
cc1de6e8 CK |
840 | return false; |
841 | ||
842 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; | |
843 | if (gtt->userptr > end || gtt->userptr + size <= start) | |
844 | return false; | |
845 | ||
637dd3b5 CK |
846 | spin_lock(>t->guptasklock); |
847 | list_for_each_entry(entry, >t->guptasks, list) { | |
848 | if (entry->task == current) { | |
849 | spin_unlock(>t->guptasklock); | |
850 | return false; | |
851 | } | |
852 | } | |
853 | spin_unlock(>t->guptasklock); | |
854 | ||
2f568dbd CK |
855 | atomic_inc(>t->mmu_invalidations); |
856 | ||
cc1de6e8 CK |
857 | return true; |
858 | } | |
859 | ||
2f568dbd CK |
860 | bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, |
861 | int *last_invalidated) | |
862 | { | |
863 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
864 | int prev_invalidated = *last_invalidated; | |
865 | ||
866 | *last_invalidated = atomic_read(>t->mmu_invalidations); | |
867 | return prev_invalidated != *last_invalidated; | |
868 | } | |
869 | ||
d38ceaf9 AD |
870 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
871 | { | |
872 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
873 | ||
874 | if (gtt == NULL) | |
875 | return false; | |
876 | ||
877 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
878 | } | |
879 | ||
880 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |
881 | struct ttm_mem_reg *mem) | |
882 | { | |
883 | uint32_t flags = 0; | |
884 | ||
885 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | |
886 | flags |= AMDGPU_PTE_VALID; | |
887 | ||
6d99905a | 888 | if (mem && mem->mem_type == TTM_PL_TT) { |
d38ceaf9 AD |
889 | flags |= AMDGPU_PTE_SYSTEM; |
890 | ||
6d99905a CK |
891 | if (ttm->caching_state == tt_cached) |
892 | flags |= AMDGPU_PTE_SNOOPED; | |
893 | } | |
d38ceaf9 | 894 | |
8f3c1629 | 895 | if (adev->asic_type >= CHIP_TONGA) |
d38ceaf9 AD |
896 | flags |= AMDGPU_PTE_EXECUTABLE; |
897 | ||
898 | flags |= AMDGPU_PTE_READABLE; | |
899 | ||
900 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | |
901 | flags |= AMDGPU_PTE_WRITEABLE; | |
902 | ||
903 | return flags; | |
904 | } | |
905 | ||
906 | static struct ttm_bo_driver amdgpu_bo_driver = { | |
907 | .ttm_tt_create = &amdgpu_ttm_tt_create, | |
908 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | |
909 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | |
910 | .invalidate_caches = &amdgpu_invalidate_caches, | |
911 | .init_mem_type = &amdgpu_init_mem_type, | |
912 | .evict_flags = &amdgpu_evict_flags, | |
913 | .move = &amdgpu_bo_move, | |
914 | .verify_access = &amdgpu_verify_access, | |
915 | .move_notify = &amdgpu_bo_move_notify, | |
916 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, | |
917 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, | |
918 | .io_mem_free = &amdgpu_ttm_io_mem_free, | |
919 | }; | |
920 | ||
921 | int amdgpu_ttm_init(struct amdgpu_device *adev) | |
922 | { | |
923 | int r; | |
924 | ||
925 | r = amdgpu_ttm_global_init(adev); | |
926 | if (r) { | |
927 | return r; | |
928 | } | |
929 | /* No others user of address space so set it to 0 */ | |
930 | r = ttm_bo_device_init(&adev->mman.bdev, | |
931 | adev->mman.bo_global_ref.ref.object, | |
932 | &amdgpu_bo_driver, | |
933 | adev->ddev->anon_inode->i_mapping, | |
934 | DRM_FILE_PAGE_OFFSET, | |
935 | adev->need_dma32); | |
936 | if (r) { | |
937 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
938 | return r; | |
939 | } | |
940 | adev->mman.initialized = true; | |
941 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, | |
942 | adev->mc.real_vram_size >> PAGE_SHIFT); | |
943 | if (r) { | |
944 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
945 | return r; | |
946 | } | |
947 | /* Change the size here instead of the init above so only lpfn is affected */ | |
948 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | |
949 | ||
950 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | |
857d913d AD |
951 | AMDGPU_GEM_DOMAIN_VRAM, |
952 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 953 | NULL, NULL, &adev->stollen_vga_memory); |
d38ceaf9 AD |
954 | if (r) { |
955 | return r; | |
956 | } | |
957 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | |
958 | if (r) | |
959 | return r; | |
960 | r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); | |
961 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | |
962 | if (r) { | |
963 | amdgpu_bo_unref(&adev->stollen_vga_memory); | |
964 | return r; | |
965 | } | |
966 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", | |
967 | (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); | |
968 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, | |
969 | adev->mc.gtt_size >> PAGE_SHIFT); | |
970 | if (r) { | |
971 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
972 | return r; | |
973 | } | |
974 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", | |
975 | (unsigned)(adev->mc.gtt_size / (1024 * 1024))); | |
976 | ||
977 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; | |
978 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; | |
979 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; | |
980 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; | |
981 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; | |
982 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; | |
983 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; | |
984 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; | |
985 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; | |
986 | /* GDS Memory */ | |
987 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, | |
988 | adev->gds.mem.total_size >> PAGE_SHIFT); | |
989 | if (r) { | |
990 | DRM_ERROR("Failed initializing GDS heap.\n"); | |
991 | return r; | |
992 | } | |
993 | ||
994 | /* GWS */ | |
995 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, | |
996 | adev->gds.gws.total_size >> PAGE_SHIFT); | |
997 | if (r) { | |
998 | DRM_ERROR("Failed initializing gws heap.\n"); | |
999 | return r; | |
1000 | } | |
1001 | ||
1002 | /* OA */ | |
1003 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, | |
1004 | adev->gds.oa.total_size >> PAGE_SHIFT); | |
1005 | if (r) { | |
1006 | DRM_ERROR("Failed initializing oa heap.\n"); | |
1007 | return r; | |
1008 | } | |
1009 | ||
1010 | r = amdgpu_ttm_debugfs_init(adev); | |
1011 | if (r) { | |
1012 | DRM_ERROR("Failed to init debugfs\n"); | |
1013 | return r; | |
1014 | } | |
1015 | return 0; | |
1016 | } | |
1017 | ||
1018 | void amdgpu_ttm_fini(struct amdgpu_device *adev) | |
1019 | { | |
1020 | int r; | |
1021 | ||
1022 | if (!adev->mman.initialized) | |
1023 | return; | |
1024 | amdgpu_ttm_debugfs_fini(adev); | |
1025 | if (adev->stollen_vga_memory) { | |
1026 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | |
1027 | if (r == 0) { | |
1028 | amdgpu_bo_unpin(adev->stollen_vga_memory); | |
1029 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | |
1030 | } | |
1031 | amdgpu_bo_unref(&adev->stollen_vga_memory); | |
1032 | } | |
1033 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); | |
1034 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); | |
1035 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); | |
1036 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); | |
1037 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); | |
1038 | ttm_bo_device_release(&adev->mman.bdev); | |
1039 | amdgpu_gart_fini(adev); | |
1040 | amdgpu_ttm_global_fini(adev); | |
1041 | adev->mman.initialized = false; | |
1042 | DRM_INFO("amdgpu: ttm finalized\n"); | |
1043 | } | |
1044 | ||
1045 | /* this should only be called at bootup or when userspace | |
1046 | * isn't running */ | |
1047 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) | |
1048 | { | |
1049 | struct ttm_mem_type_manager *man; | |
1050 | ||
1051 | if (!adev->mman.initialized) | |
1052 | return; | |
1053 | ||
1054 | man = &adev->mman.bdev.man[TTM_PL_VRAM]; | |
1055 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
1056 | man->size = size >> PAGE_SHIFT; | |
1057 | } | |
1058 | ||
d38ceaf9 AD |
1059 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) |
1060 | { | |
1061 | struct drm_file *file_priv; | |
1062 | struct amdgpu_device *adev; | |
d38ceaf9 | 1063 | |
e176fe17 | 1064 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
d38ceaf9 | 1065 | return -EINVAL; |
d38ceaf9 AD |
1066 | |
1067 | file_priv = filp->private_data; | |
1068 | adev = file_priv->minor->dev->dev_private; | |
e176fe17 | 1069 | if (adev == NULL) |
d38ceaf9 | 1070 | return -EINVAL; |
e176fe17 CK |
1071 | |
1072 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); | |
d38ceaf9 AD |
1073 | } |
1074 | ||
1075 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |
1076 | uint64_t src_offset, | |
1077 | uint64_t dst_offset, | |
1078 | uint32_t byte_count, | |
1079 | struct reservation_object *resv, | |
c7ae72c0 | 1080 | struct fence **fence) |
d38ceaf9 AD |
1081 | { |
1082 | struct amdgpu_device *adev = ring->adev; | |
d71518b5 CK |
1083 | struct amdgpu_job *job; |
1084 | ||
d38ceaf9 AD |
1085 | uint32_t max_bytes; |
1086 | unsigned num_loops, num_dw; | |
1087 | unsigned i; | |
1088 | int r; | |
1089 | ||
d38ceaf9 AD |
1090 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
1091 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); | |
1092 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; | |
1093 | ||
c7ae72c0 CZ |
1094 | /* for IB padding */ |
1095 | while (num_dw & 0x7) | |
1096 | num_dw++; | |
1097 | ||
d71518b5 CK |
1098 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
1099 | if (r) | |
9066b0c3 | 1100 | return r; |
c7ae72c0 CZ |
1101 | |
1102 | if (resv) { | |
e86f9cee | 1103 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
c7ae72c0 CZ |
1104 | AMDGPU_FENCE_OWNER_UNDEFINED); |
1105 | if (r) { | |
1106 | DRM_ERROR("sync failed (%d).\n", r); | |
1107 | goto error_free; | |
1108 | } | |
d38ceaf9 | 1109 | } |
d38ceaf9 AD |
1110 | |
1111 | for (i = 0; i < num_loops; i++) { | |
1112 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | |
1113 | ||
d71518b5 CK |
1114 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, |
1115 | dst_offset, cur_size_in_bytes); | |
d38ceaf9 AD |
1116 | |
1117 | src_offset += cur_size_in_bytes; | |
1118 | dst_offset += cur_size_in_bytes; | |
1119 | byte_count -= cur_size_in_bytes; | |
1120 | } | |
1121 | ||
d71518b5 CK |
1122 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
1123 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
703297c1 CK |
1124 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
1125 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); | |
c7ae72c0 CZ |
1126 | if (r) |
1127 | goto error_free; | |
d38ceaf9 AD |
1128 | |
1129 | return 0; | |
d71518b5 | 1130 | |
c7ae72c0 | 1131 | error_free: |
d71518b5 | 1132 | amdgpu_job_free(job); |
c7ae72c0 | 1133 | return r; |
d38ceaf9 AD |
1134 | } |
1135 | ||
1136 | #if defined(CONFIG_DEBUG_FS) | |
1137 | ||
1138 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | |
1139 | { | |
1140 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
1141 | unsigned ttm_pl = *(int *)node->info_ent->data; | |
1142 | struct drm_device *dev = node->minor->dev; | |
1143 | struct amdgpu_device *adev = dev->dev_private; | |
1144 | struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv; | |
1145 | int ret; | |
1146 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | |
1147 | ||
1148 | spin_lock(&glob->lru_lock); | |
1149 | ret = drm_mm_dump_table(m, mm); | |
1150 | spin_unlock(&glob->lru_lock); | |
a2ef8a97 | 1151 | if (ttm_pl == TTM_PL_VRAM) |
e1b35f61 | 1152 | seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", |
a2ef8a97 | 1153 | adev->mman.bdev.man[ttm_pl].size, |
e1b35f61 AB |
1154 | (u64)atomic64_read(&adev->vram_usage) >> 20, |
1155 | (u64)atomic64_read(&adev->vram_vis_usage) >> 20); | |
d38ceaf9 AD |
1156 | return ret; |
1157 | } | |
1158 | ||
1159 | static int ttm_pl_vram = TTM_PL_VRAM; | |
1160 | static int ttm_pl_tt = TTM_PL_TT; | |
1161 | ||
1162 | static struct drm_info_list amdgpu_ttm_debugfs_list[] = { | |
1163 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, | |
1164 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, | |
1165 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | |
1166 | #ifdef CONFIG_SWIOTLB | |
1167 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | |
1168 | #endif | |
1169 | }; | |
1170 | ||
1171 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, | |
1172 | size_t size, loff_t *pos) | |
1173 | { | |
1174 | struct amdgpu_device *adev = f->f_inode->i_private; | |
1175 | ssize_t result = 0; | |
1176 | int r; | |
1177 | ||
1178 | if (size & 0x3 || *pos & 0x3) | |
1179 | return -EINVAL; | |
1180 | ||
1181 | while (size) { | |
1182 | unsigned long flags; | |
1183 | uint32_t value; | |
1184 | ||
1185 | if (*pos >= adev->mc.mc_vram_size) | |
1186 | return result; | |
1187 | ||
1188 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
1189 | WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); | |
1190 | WREG32(mmMM_INDEX_HI, *pos >> 31); | |
1191 | value = RREG32(mmMM_DATA); | |
1192 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | |
1193 | ||
1194 | r = put_user(value, (uint32_t *)buf); | |
1195 | if (r) | |
1196 | return r; | |
1197 | ||
1198 | result += 4; | |
1199 | buf += 4; | |
1200 | *pos += 4; | |
1201 | size -= 4; | |
1202 | } | |
1203 | ||
1204 | return result; | |
1205 | } | |
1206 | ||
1207 | static const struct file_operations amdgpu_ttm_vram_fops = { | |
1208 | .owner = THIS_MODULE, | |
1209 | .read = amdgpu_ttm_vram_read, | |
1210 | .llseek = default_llseek | |
1211 | }; | |
1212 | ||
1213 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, | |
1214 | size_t size, loff_t *pos) | |
1215 | { | |
1216 | struct amdgpu_device *adev = f->f_inode->i_private; | |
1217 | ssize_t result = 0; | |
1218 | int r; | |
1219 | ||
1220 | while (size) { | |
1221 | loff_t p = *pos / PAGE_SIZE; | |
1222 | unsigned off = *pos & ~PAGE_MASK; | |
1223 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); | |
1224 | struct page *page; | |
1225 | void *ptr; | |
1226 | ||
1227 | if (p >= adev->gart.num_cpu_pages) | |
1228 | return result; | |
1229 | ||
1230 | page = adev->gart.pages[p]; | |
1231 | if (page) { | |
1232 | ptr = kmap(page); | |
1233 | ptr += off; | |
1234 | ||
1235 | r = copy_to_user(buf, ptr, cur_size); | |
1236 | kunmap(adev->gart.pages[p]); | |
1237 | } else | |
1238 | r = clear_user(buf, cur_size); | |
1239 | ||
1240 | if (r) | |
1241 | return -EFAULT; | |
1242 | ||
1243 | result += cur_size; | |
1244 | buf += cur_size; | |
1245 | *pos += cur_size; | |
1246 | size -= cur_size; | |
1247 | } | |
1248 | ||
1249 | return result; | |
1250 | } | |
1251 | ||
1252 | static const struct file_operations amdgpu_ttm_gtt_fops = { | |
1253 | .owner = THIS_MODULE, | |
1254 | .read = amdgpu_ttm_gtt_read, | |
1255 | .llseek = default_llseek | |
1256 | }; | |
1257 | ||
1258 | #endif | |
1259 | ||
1260 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) | |
1261 | { | |
1262 | #if defined(CONFIG_DEBUG_FS) | |
1263 | unsigned count; | |
1264 | ||
1265 | struct drm_minor *minor = adev->ddev->primary; | |
1266 | struct dentry *ent, *root = minor->debugfs_root; | |
1267 | ||
1268 | ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root, | |
1269 | adev, &amdgpu_ttm_vram_fops); | |
1270 | if (IS_ERR(ent)) | |
1271 | return PTR_ERR(ent); | |
1272 | i_size_write(ent->d_inode, adev->mc.mc_vram_size); | |
1273 | adev->mman.vram = ent; | |
1274 | ||
1275 | ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root, | |
1276 | adev, &amdgpu_ttm_gtt_fops); | |
1277 | if (IS_ERR(ent)) | |
1278 | return PTR_ERR(ent); | |
1279 | i_size_write(ent->d_inode, adev->mc.gtt_size); | |
1280 | adev->mman.gtt = ent; | |
1281 | ||
1282 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); | |
1283 | ||
1284 | #ifdef CONFIG_SWIOTLB | |
1285 | if (!swiotlb_nr_tbl()) | |
1286 | --count; | |
1287 | #endif | |
1288 | ||
1289 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); | |
1290 | #else | |
1291 | ||
1292 | return 0; | |
1293 | #endif | |
1294 | } | |
1295 | ||
1296 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) | |
1297 | { | |
1298 | #if defined(CONFIG_DEBUG_FS) | |
1299 | ||
1300 | debugfs_remove(adev->mman.vram); | |
1301 | adev->mman.vram = NULL; | |
1302 | ||
1303 | debugfs_remove(adev->mman.gtt); | |
1304 | adev->mman.gtt = NULL; | |
1305 | #endif | |
1306 | } |