Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <ttm/ttm_bo_api.h> | |
33 | #include <ttm/ttm_bo_driver.h> | |
34 | #include <ttm/ttm_placement.h> | |
35 | #include <ttm/ttm_module.h> | |
36 | #include <ttm/ttm_page_alloc.h> | |
37 | #include <drm/drmP.h> | |
38 | #include <drm/amdgpu_drm.h> | |
39 | #include <linux/seq_file.h> | |
40 | #include <linux/slab.h> | |
41 | #include <linux/swiotlb.h> | |
42 | #include <linux/swap.h> | |
43 | #include <linux/pagemap.h> | |
44 | #include <linux/debugfs.h> | |
45 | #include "amdgpu.h" | |
46 | #include "bif/bif_4_1_d.h" | |
47 | ||
48 | #define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT) | |
49 | ||
50 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev); | |
51 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev); | |
52 | ||
53 | static struct amdgpu_device *amdgpu_get_adev(struct ttm_bo_device *bdev) | |
54 | { | |
55 | struct amdgpu_mman *mman; | |
56 | struct amdgpu_device *adev; | |
57 | ||
58 | mman = container_of(bdev, struct amdgpu_mman, bdev); | |
59 | adev = container_of(mman, struct amdgpu_device, mman); | |
60 | return adev; | |
61 | } | |
62 | ||
63 | ||
64 | /* | |
65 | * Global memory. | |
66 | */ | |
67 | static int amdgpu_ttm_mem_global_init(struct drm_global_reference *ref) | |
68 | { | |
69 | return ttm_mem_global_init(ref->object); | |
70 | } | |
71 | ||
72 | static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref) | |
73 | { | |
74 | ttm_mem_global_release(ref->object); | |
75 | } | |
76 | ||
77 | static int amdgpu_ttm_global_init(struct amdgpu_device *adev) | |
78 | { | |
79 | struct drm_global_reference *global_ref; | |
703297c1 CK |
80 | struct amdgpu_ring *ring; |
81 | struct amd_sched_rq *rq; | |
d38ceaf9 AD |
82 | int r; |
83 | ||
84 | adev->mman.mem_global_referenced = false; | |
85 | global_ref = &adev->mman.mem_global_ref; | |
86 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | |
87 | global_ref->size = sizeof(struct ttm_mem_global); | |
88 | global_ref->init = &amdgpu_ttm_mem_global_init; | |
89 | global_ref->release = &amdgpu_ttm_mem_global_release; | |
90 | r = drm_global_item_ref(global_ref); | |
91 | if (r != 0) { | |
92 | DRM_ERROR("Failed setting up TTM memory accounting " | |
93 | "subsystem.\n"); | |
94 | return r; | |
95 | } | |
96 | ||
97 | adev->mman.bo_global_ref.mem_glob = | |
98 | adev->mman.mem_global_ref.object; | |
99 | global_ref = &adev->mman.bo_global_ref.ref; | |
100 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | |
101 | global_ref->size = sizeof(struct ttm_bo_global); | |
102 | global_ref->init = &ttm_bo_global_init; | |
103 | global_ref->release = &ttm_bo_global_release; | |
104 | r = drm_global_item_ref(global_ref); | |
105 | if (r != 0) { | |
106 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
107 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
108 | return r; | |
109 | } | |
110 | ||
703297c1 CK |
111 | ring = adev->mman.buffer_funcs_ring; |
112 | rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; | |
113 | r = amd_sched_entity_init(&ring->sched, &adev->mman.entity, | |
114 | rq, amdgpu_sched_jobs); | |
115 | if (r != 0) { | |
116 | DRM_ERROR("Failed setting up TTM BO move run queue.\n"); | |
117 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
118 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); | |
119 | return r; | |
120 | } | |
121 | ||
d38ceaf9 | 122 | adev->mman.mem_global_referenced = true; |
703297c1 | 123 | |
d38ceaf9 AD |
124 | return 0; |
125 | } | |
126 | ||
127 | static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) | |
128 | { | |
129 | if (adev->mman.mem_global_referenced) { | |
703297c1 CK |
130 | amd_sched_entity_fini(adev->mman.entity.sched, |
131 | &adev->mman.entity); | |
d38ceaf9 AD |
132 | drm_global_item_unref(&adev->mman.bo_global_ref.ref); |
133 | drm_global_item_unref(&adev->mman.mem_global_ref); | |
134 | adev->mman.mem_global_referenced = false; | |
135 | } | |
136 | } | |
137 | ||
138 | static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
139 | { | |
140 | return 0; | |
141 | } | |
142 | ||
143 | static int amdgpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
144 | struct ttm_mem_type_manager *man) | |
145 | { | |
146 | struct amdgpu_device *adev; | |
147 | ||
148 | adev = amdgpu_get_adev(bdev); | |
149 | ||
150 | switch (type) { | |
151 | case TTM_PL_SYSTEM: | |
152 | /* System memory */ | |
153 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
154 | man->available_caching = TTM_PL_MASK_CACHING; | |
155 | man->default_caching = TTM_PL_FLAG_CACHED; | |
156 | break; | |
157 | case TTM_PL_TT: | |
158 | man->func = &ttm_bo_manager_func; | |
159 | man->gpu_offset = adev->mc.gtt_start; | |
160 | man->available_caching = TTM_PL_MASK_CACHING; | |
161 | man->default_caching = TTM_PL_FLAG_CACHED; | |
162 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; | |
163 | break; | |
164 | case TTM_PL_VRAM: | |
165 | /* "On-card" video ram */ | |
166 | man->func = &ttm_bo_manager_func; | |
167 | man->gpu_offset = adev->mc.vram_start; | |
168 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
169 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
170 | man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; | |
171 | man->default_caching = TTM_PL_FLAG_WC; | |
172 | break; | |
173 | case AMDGPU_PL_GDS: | |
174 | case AMDGPU_PL_GWS: | |
175 | case AMDGPU_PL_OA: | |
176 | /* On-chip GDS memory*/ | |
177 | man->func = &ttm_bo_manager_func; | |
178 | man->gpu_offset = 0; | |
179 | man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_CMA; | |
180 | man->available_caching = TTM_PL_FLAG_UNCACHED; | |
181 | man->default_caching = TTM_PL_FLAG_UNCACHED; | |
182 | break; | |
183 | default: | |
184 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
185 | return -EINVAL; | |
186 | } | |
187 | return 0; | |
188 | } | |
189 | ||
190 | static void amdgpu_evict_flags(struct ttm_buffer_object *bo, | |
191 | struct ttm_placement *placement) | |
192 | { | |
193 | struct amdgpu_bo *rbo; | |
194 | static struct ttm_place placements = { | |
195 | .fpfn = 0, | |
196 | .lpfn = 0, | |
197 | .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | |
198 | }; | |
199 | ||
200 | if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) { | |
201 | placement->placement = &placements; | |
202 | placement->busy_placement = &placements; | |
203 | placement->num_placement = 1; | |
204 | placement->num_busy_placement = 1; | |
205 | return; | |
206 | } | |
207 | rbo = container_of(bo, struct amdgpu_bo, tbo); | |
208 | switch (bo->mem.mem_type) { | |
209 | case TTM_PL_VRAM: | |
210 | if (rbo->adev->mman.buffer_funcs_ring->ready == false) | |
211 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | |
212 | else | |
213 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_GTT); | |
214 | break; | |
215 | case TTM_PL_TT: | |
216 | default: | |
217 | amdgpu_ttm_placement_from_domain(rbo, AMDGPU_GEM_DOMAIN_CPU); | |
218 | } | |
219 | *placement = rbo->placement; | |
220 | } | |
221 | ||
222 | static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
223 | { | |
224 | struct amdgpu_bo *rbo = container_of(bo, struct amdgpu_bo, tbo); | |
225 | ||
054892ed JG |
226 | if (amdgpu_ttm_tt_get_usermm(bo->ttm)) |
227 | return -EPERM; | |
d38ceaf9 AD |
228 | return drm_vma_node_verify_access(&rbo->gem_base.vma_node, filp); |
229 | } | |
230 | ||
231 | static void amdgpu_move_null(struct ttm_buffer_object *bo, | |
232 | struct ttm_mem_reg *new_mem) | |
233 | { | |
234 | struct ttm_mem_reg *old_mem = &bo->mem; | |
235 | ||
236 | BUG_ON(old_mem->mm_node != NULL); | |
237 | *old_mem = *new_mem; | |
238 | new_mem->mm_node = NULL; | |
239 | } | |
240 | ||
241 | static int amdgpu_move_blit(struct ttm_buffer_object *bo, | |
242 | bool evict, bool no_wait_gpu, | |
243 | struct ttm_mem_reg *new_mem, | |
244 | struct ttm_mem_reg *old_mem) | |
245 | { | |
246 | struct amdgpu_device *adev; | |
247 | struct amdgpu_ring *ring; | |
248 | uint64_t old_start, new_start; | |
c7ae72c0 | 249 | struct fence *fence; |
d38ceaf9 AD |
250 | int r; |
251 | ||
252 | adev = amdgpu_get_adev(bo->bdev); | |
253 | ring = adev->mman.buffer_funcs_ring; | |
254 | old_start = old_mem->start << PAGE_SHIFT; | |
255 | new_start = new_mem->start << PAGE_SHIFT; | |
256 | ||
257 | switch (old_mem->mem_type) { | |
258 | case TTM_PL_VRAM: | |
259 | old_start += adev->mc.vram_start; | |
260 | break; | |
261 | case TTM_PL_TT: | |
262 | old_start += adev->mc.gtt_start; | |
263 | break; | |
264 | default: | |
265 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
266 | return -EINVAL; | |
267 | } | |
268 | switch (new_mem->mem_type) { | |
269 | case TTM_PL_VRAM: | |
270 | new_start += adev->mc.vram_start; | |
271 | break; | |
272 | case TTM_PL_TT: | |
273 | new_start += adev->mc.gtt_start; | |
274 | break; | |
275 | default: | |
276 | DRM_ERROR("Unknown placement %d\n", old_mem->mem_type); | |
277 | return -EINVAL; | |
278 | } | |
279 | if (!ring->ready) { | |
280 | DRM_ERROR("Trying to move memory with ring turned off.\n"); | |
281 | return -EINVAL; | |
282 | } | |
283 | ||
284 | BUILD_BUG_ON((PAGE_SIZE % AMDGPU_GPU_PAGE_SIZE) != 0); | |
285 | ||
286 | r = amdgpu_copy_buffer(ring, old_start, new_start, | |
287 | new_mem->num_pages * PAGE_SIZE, /* bytes */ | |
288 | bo->resv, &fence); | |
ce64bc25 CK |
289 | if (r) |
290 | return r; | |
291 | ||
292 | r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); | |
c7ae72c0 | 293 | fence_put(fence); |
d38ceaf9 AD |
294 | return r; |
295 | } | |
296 | ||
297 | static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, | |
298 | bool evict, bool interruptible, | |
299 | bool no_wait_gpu, | |
300 | struct ttm_mem_reg *new_mem) | |
301 | { | |
302 | struct amdgpu_device *adev; | |
303 | struct ttm_mem_reg *old_mem = &bo->mem; | |
304 | struct ttm_mem_reg tmp_mem; | |
305 | struct ttm_place placements; | |
306 | struct ttm_placement placement; | |
307 | int r; | |
308 | ||
309 | adev = amdgpu_get_adev(bo->bdev); | |
310 | tmp_mem = *new_mem; | |
311 | tmp_mem.mm_node = NULL; | |
312 | placement.num_placement = 1; | |
313 | placement.placement = &placements; | |
314 | placement.num_busy_placement = 1; | |
315 | placement.busy_placement = &placements; | |
316 | placements.fpfn = 0; | |
317 | placements.lpfn = 0; | |
318 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
319 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
320 | interruptible, no_wait_gpu); | |
321 | if (unlikely(r)) { | |
322 | return r; | |
323 | } | |
324 | ||
325 | r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); | |
326 | if (unlikely(r)) { | |
327 | goto out_cleanup; | |
328 | } | |
329 | ||
330 | r = ttm_tt_bind(bo->ttm, &tmp_mem); | |
331 | if (unlikely(r)) { | |
332 | goto out_cleanup; | |
333 | } | |
334 | r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); | |
335 | if (unlikely(r)) { | |
336 | goto out_cleanup; | |
337 | } | |
338 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); | |
339 | out_cleanup: | |
340 | ttm_bo_mem_put(bo, &tmp_mem); | |
341 | return r; | |
342 | } | |
343 | ||
344 | static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, | |
345 | bool evict, bool interruptible, | |
346 | bool no_wait_gpu, | |
347 | struct ttm_mem_reg *new_mem) | |
348 | { | |
349 | struct amdgpu_device *adev; | |
350 | struct ttm_mem_reg *old_mem = &bo->mem; | |
351 | struct ttm_mem_reg tmp_mem; | |
352 | struct ttm_placement placement; | |
353 | struct ttm_place placements; | |
354 | int r; | |
355 | ||
356 | adev = amdgpu_get_adev(bo->bdev); | |
357 | tmp_mem = *new_mem; | |
358 | tmp_mem.mm_node = NULL; | |
359 | placement.num_placement = 1; | |
360 | placement.placement = &placements; | |
361 | placement.num_busy_placement = 1; | |
362 | placement.busy_placement = &placements; | |
363 | placements.fpfn = 0; | |
364 | placements.lpfn = 0; | |
365 | placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; | |
366 | r = ttm_bo_mem_space(bo, &placement, &tmp_mem, | |
367 | interruptible, no_wait_gpu); | |
368 | if (unlikely(r)) { | |
369 | return r; | |
370 | } | |
371 | r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); | |
372 | if (unlikely(r)) { | |
373 | goto out_cleanup; | |
374 | } | |
375 | r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); | |
376 | if (unlikely(r)) { | |
377 | goto out_cleanup; | |
378 | } | |
379 | out_cleanup: | |
380 | ttm_bo_mem_put(bo, &tmp_mem); | |
381 | return r; | |
382 | } | |
383 | ||
384 | static int amdgpu_bo_move(struct ttm_buffer_object *bo, | |
385 | bool evict, bool interruptible, | |
386 | bool no_wait_gpu, | |
387 | struct ttm_mem_reg *new_mem) | |
388 | { | |
389 | struct amdgpu_device *adev; | |
104ece97 | 390 | struct amdgpu_bo *abo; |
d38ceaf9 AD |
391 | struct ttm_mem_reg *old_mem = &bo->mem; |
392 | int r; | |
393 | ||
104ece97 MD |
394 | /* Can't move a pinned BO */ |
395 | abo = container_of(bo, struct amdgpu_bo, tbo); | |
396 | if (WARN_ON_ONCE(abo->pin_count > 0)) | |
397 | return -EINVAL; | |
398 | ||
d38ceaf9 | 399 | adev = amdgpu_get_adev(bo->bdev); |
dbd5ed60 CK |
400 | |
401 | /* remember the eviction */ | |
402 | if (evict) | |
403 | atomic64_inc(&adev->num_evictions); | |
404 | ||
d38ceaf9 AD |
405 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { |
406 | amdgpu_move_null(bo, new_mem); | |
407 | return 0; | |
408 | } | |
409 | if ((old_mem->mem_type == TTM_PL_TT && | |
410 | new_mem->mem_type == TTM_PL_SYSTEM) || | |
411 | (old_mem->mem_type == TTM_PL_SYSTEM && | |
412 | new_mem->mem_type == TTM_PL_TT)) { | |
413 | /* bind is enough */ | |
414 | amdgpu_move_null(bo, new_mem); | |
415 | return 0; | |
416 | } | |
417 | if (adev->mman.buffer_funcs == NULL || | |
418 | adev->mman.buffer_funcs_ring == NULL || | |
419 | !adev->mman.buffer_funcs_ring->ready) { | |
420 | /* use memcpy */ | |
421 | goto memcpy; | |
422 | } | |
423 | ||
424 | if (old_mem->mem_type == TTM_PL_VRAM && | |
425 | new_mem->mem_type == TTM_PL_SYSTEM) { | |
426 | r = amdgpu_move_vram_ram(bo, evict, interruptible, | |
427 | no_wait_gpu, new_mem); | |
428 | } else if (old_mem->mem_type == TTM_PL_SYSTEM && | |
429 | new_mem->mem_type == TTM_PL_VRAM) { | |
430 | r = amdgpu_move_ram_vram(bo, evict, interruptible, | |
431 | no_wait_gpu, new_mem); | |
432 | } else { | |
433 | r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem); | |
434 | } | |
435 | ||
436 | if (r) { | |
437 | memcpy: | |
77dfc28b CK |
438 | r = ttm_bo_move_memcpy(bo, evict, interruptible, |
439 | no_wait_gpu, new_mem); | |
d38ceaf9 AD |
440 | if (r) { |
441 | return r; | |
442 | } | |
443 | } | |
444 | ||
445 | /* update statistics */ | |
446 | atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); | |
447 | return 0; | |
448 | } | |
449 | ||
450 | static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
451 | { | |
452 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
453 | struct amdgpu_device *adev = amdgpu_get_adev(bdev); | |
454 | ||
455 | mem->bus.addr = NULL; | |
456 | mem->bus.offset = 0; | |
457 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
458 | mem->bus.base = 0; | |
459 | mem->bus.is_iomem = false; | |
460 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
461 | return -EINVAL; | |
462 | switch (mem->mem_type) { | |
463 | case TTM_PL_SYSTEM: | |
464 | /* system memory */ | |
465 | return 0; | |
466 | case TTM_PL_TT: | |
467 | break; | |
468 | case TTM_PL_VRAM: | |
469 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
470 | /* check if it's visible */ | |
471 | if ((mem->bus.offset + mem->bus.size) > adev->mc.visible_vram_size) | |
472 | return -EINVAL; | |
473 | mem->bus.base = adev->mc.aper_base; | |
474 | mem->bus.is_iomem = true; | |
475 | #ifdef __alpha__ | |
476 | /* | |
477 | * Alpha: use bus.addr to hold the ioremap() return, | |
478 | * so we can modify bus.base below. | |
479 | */ | |
480 | if (mem->placement & TTM_PL_FLAG_WC) | |
481 | mem->bus.addr = | |
482 | ioremap_wc(mem->bus.base + mem->bus.offset, | |
483 | mem->bus.size); | |
484 | else | |
485 | mem->bus.addr = | |
486 | ioremap_nocache(mem->bus.base + mem->bus.offset, | |
487 | mem->bus.size); | |
488 | ||
489 | /* | |
490 | * Alpha: Use just the bus offset plus | |
491 | * the hose/domain memory base for bus.base. | |
492 | * It then can be used to build PTEs for VRAM | |
493 | * access, as done in ttm_bo_vm_fault(). | |
494 | */ | |
495 | mem->bus.base = (mem->bus.base & 0x0ffffffffUL) + | |
496 | adev->ddev->hose->dense_mem_base; | |
497 | #endif | |
498 | break; | |
499 | default: | |
500 | return -EINVAL; | |
501 | } | |
502 | return 0; | |
503 | } | |
504 | ||
505 | static void amdgpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) | |
506 | { | |
507 | } | |
508 | ||
509 | /* | |
510 | * TTM backend functions. | |
511 | */ | |
637dd3b5 CK |
512 | struct amdgpu_ttm_gup_task_list { |
513 | struct list_head list; | |
514 | struct task_struct *task; | |
515 | }; | |
516 | ||
d38ceaf9 | 517 | struct amdgpu_ttm_tt { |
637dd3b5 CK |
518 | struct ttm_dma_tt ttm; |
519 | struct amdgpu_device *adev; | |
520 | u64 offset; | |
521 | uint64_t userptr; | |
522 | struct mm_struct *usermm; | |
523 | uint32_t userflags; | |
524 | spinlock_t guptasklock; | |
525 | struct list_head guptasks; | |
2f568dbd | 526 | atomic_t mmu_invalidations; |
d38ceaf9 AD |
527 | }; |
528 | ||
2f568dbd | 529 | int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) |
d38ceaf9 | 530 | { |
d38ceaf9 | 531 | struct amdgpu_ttm_tt *gtt = (void *)ttm; |
d38ceaf9 | 532 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); |
2f568dbd CK |
533 | unsigned pinned = 0; |
534 | int r; | |
d38ceaf9 AD |
535 | |
536 | if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { | |
2f568dbd | 537 | /* check that we only use anonymous memory |
d38ceaf9 AD |
538 | to prevent problems with writeback */ |
539 | unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; | |
540 | struct vm_area_struct *vma; | |
541 | ||
542 | vma = find_vma(gtt->usermm, gtt->userptr); | |
543 | if (!vma || vma->vm_file || vma->vm_end < end) | |
544 | return -EPERM; | |
545 | } | |
546 | ||
547 | do { | |
548 | unsigned num_pages = ttm->num_pages - pinned; | |
549 | uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE; | |
2f568dbd | 550 | struct page **p = pages + pinned; |
637dd3b5 CK |
551 | struct amdgpu_ttm_gup_task_list guptask; |
552 | ||
553 | guptask.task = current; | |
554 | spin_lock(>t->guptasklock); | |
555 | list_add(&guptask.list, >t->guptasks); | |
556 | spin_unlock(>t->guptasklock); | |
d38ceaf9 | 557 | |
266c73b7 | 558 | r = get_user_pages(userptr, num_pages, write, 0, p, NULL); |
637dd3b5 CK |
559 | |
560 | spin_lock(>t->guptasklock); | |
561 | list_del(&guptask.list); | |
562 | spin_unlock(>t->guptasklock); | |
d38ceaf9 | 563 | |
d38ceaf9 AD |
564 | if (r < 0) |
565 | goto release_pages; | |
566 | ||
567 | pinned += r; | |
568 | ||
569 | } while (pinned < ttm->num_pages); | |
570 | ||
2f568dbd CK |
571 | return 0; |
572 | ||
573 | release_pages: | |
574 | release_pages(pages, pinned, 0); | |
575 | return r; | |
576 | } | |
577 | ||
578 | /* prepare the sg table with the user pages */ | |
579 | static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) | |
580 | { | |
581 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | |
582 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
583 | unsigned nents; | |
584 | int r; | |
585 | ||
586 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
587 | enum dma_data_direction direction = write ? | |
588 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
589 | ||
d38ceaf9 AD |
590 | r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0, |
591 | ttm->num_pages << PAGE_SHIFT, | |
592 | GFP_KERNEL); | |
593 | if (r) | |
594 | goto release_sg; | |
595 | ||
596 | r = -ENOMEM; | |
597 | nents = dma_map_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
598 | if (nents != ttm->sg->nents) | |
599 | goto release_sg; | |
600 | ||
601 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
602 | gtt->ttm.dma_address, ttm->num_pages); | |
603 | ||
604 | return 0; | |
605 | ||
606 | release_sg: | |
607 | kfree(ttm->sg); | |
d38ceaf9 AD |
608 | return r; |
609 | } | |
610 | ||
611 | static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) | |
612 | { | |
613 | struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev); | |
614 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
dd08fae1 | 615 | struct sg_page_iter sg_iter; |
d38ceaf9 AD |
616 | |
617 | int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
618 | enum dma_data_direction direction = write ? | |
619 | DMA_BIDIRECTIONAL : DMA_TO_DEVICE; | |
620 | ||
621 | /* double check that we don't free the table twice */ | |
622 | if (!ttm->sg->sgl) | |
623 | return; | |
624 | ||
625 | /* free the sg table and pages again */ | |
626 | dma_unmap_sg(adev->dev, ttm->sg->sgl, ttm->sg->nents, direction); | |
627 | ||
dd08fae1 | 628 | for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) { |
629 | struct page *page = sg_page_iter_page(&sg_iter); | |
d38ceaf9 AD |
630 | if (!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY)) |
631 | set_page_dirty(page); | |
632 | ||
633 | mark_page_accessed(page); | |
09cbfeaf | 634 | put_page(page); |
d38ceaf9 AD |
635 | } |
636 | ||
637 | sg_free_table(ttm->sg); | |
638 | } | |
639 | ||
640 | static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm, | |
641 | struct ttm_mem_reg *bo_mem) | |
642 | { | |
643 | struct amdgpu_ttm_tt *gtt = (void*)ttm; | |
644 | uint32_t flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem); | |
645 | int r; | |
646 | ||
e2f784fa CZ |
647 | if (gtt->userptr) { |
648 | r = amdgpu_ttm_tt_pin_userptr(ttm); | |
649 | if (r) { | |
650 | DRM_ERROR("failed to pin userptr\n"); | |
651 | return r; | |
652 | } | |
653 | } | |
d38ceaf9 AD |
654 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); |
655 | if (!ttm->num_pages) { | |
656 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
657 | ttm->num_pages, bo_mem, ttm); | |
658 | } | |
659 | ||
660 | if (bo_mem->mem_type == AMDGPU_PL_GDS || | |
661 | bo_mem->mem_type == AMDGPU_PL_GWS || | |
662 | bo_mem->mem_type == AMDGPU_PL_OA) | |
663 | return -EINVAL; | |
664 | ||
665 | r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages, | |
666 | ttm->pages, gtt->ttm.dma_address, flags); | |
667 | ||
668 | if (r) { | |
669 | DRM_ERROR("failed to bind %lu pages at 0x%08X\n", | |
670 | ttm->num_pages, (unsigned)gtt->offset); | |
671 | return r; | |
672 | } | |
673 | return 0; | |
674 | } | |
675 | ||
676 | static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm) | |
677 | { | |
678 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
679 | ||
680 | /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ | |
681 | if (gtt->adev->gart.ready) | |
682 | amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages); | |
683 | ||
684 | if (gtt->userptr) | |
685 | amdgpu_ttm_tt_unpin_userptr(ttm); | |
686 | ||
687 | return 0; | |
688 | } | |
689 | ||
690 | static void amdgpu_ttm_backend_destroy(struct ttm_tt *ttm) | |
691 | { | |
692 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
693 | ||
694 | ttm_dma_tt_fini(>t->ttm); | |
695 | kfree(gtt); | |
696 | } | |
697 | ||
698 | static struct ttm_backend_func amdgpu_backend_func = { | |
699 | .bind = &amdgpu_ttm_backend_bind, | |
700 | .unbind = &amdgpu_ttm_backend_unbind, | |
701 | .destroy = &amdgpu_ttm_backend_destroy, | |
702 | }; | |
703 | ||
704 | static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev, | |
705 | unsigned long size, uint32_t page_flags, | |
706 | struct page *dummy_read_page) | |
707 | { | |
708 | struct amdgpu_device *adev; | |
709 | struct amdgpu_ttm_tt *gtt; | |
710 | ||
711 | adev = amdgpu_get_adev(bdev); | |
712 | ||
713 | gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); | |
714 | if (gtt == NULL) { | |
715 | return NULL; | |
716 | } | |
717 | gtt->ttm.ttm.func = &amdgpu_backend_func; | |
718 | gtt->adev = adev; | |
719 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, dummy_read_page)) { | |
720 | kfree(gtt); | |
721 | return NULL; | |
722 | } | |
723 | return >t->ttm.ttm; | |
724 | } | |
725 | ||
726 | static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm) | |
727 | { | |
728 | struct amdgpu_device *adev; | |
729 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
730 | unsigned i; | |
731 | int r; | |
732 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | |
733 | ||
734 | if (ttm->state != tt_unpopulated) | |
735 | return 0; | |
736 | ||
737 | if (gtt && gtt->userptr) { | |
5f0b34cc | 738 | ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); |
d38ceaf9 AD |
739 | if (!ttm->sg) |
740 | return -ENOMEM; | |
741 | ||
742 | ttm->page_flags |= TTM_PAGE_FLAG_SG; | |
743 | ttm->state = tt_unbound; | |
744 | return 0; | |
745 | } | |
746 | ||
747 | if (slave && ttm->sg) { | |
748 | drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, | |
749 | gtt->ttm.dma_address, ttm->num_pages); | |
750 | ttm->state = tt_unbound; | |
751 | return 0; | |
752 | } | |
753 | ||
754 | adev = amdgpu_get_adev(ttm->bdev); | |
755 | ||
756 | #ifdef CONFIG_SWIOTLB | |
757 | if (swiotlb_nr_tbl()) { | |
758 | return ttm_dma_populate(>t->ttm, adev->dev); | |
759 | } | |
760 | #endif | |
761 | ||
762 | r = ttm_pool_populate(ttm); | |
763 | if (r) { | |
764 | return r; | |
765 | } | |
766 | ||
767 | for (i = 0; i < ttm->num_pages; i++) { | |
768 | gtt->ttm.dma_address[i] = pci_map_page(adev->pdev, ttm->pages[i], | |
769 | 0, PAGE_SIZE, | |
770 | PCI_DMA_BIDIRECTIONAL); | |
771 | if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) { | |
09ccbb74 | 772 | while (i--) { |
d38ceaf9 AD |
773 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], |
774 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
775 | gtt->ttm.dma_address[i] = 0; | |
776 | } | |
777 | ttm_pool_unpopulate(ttm); | |
778 | return -EFAULT; | |
779 | } | |
780 | } | |
781 | return 0; | |
782 | } | |
783 | ||
784 | static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
785 | { | |
786 | struct amdgpu_device *adev; | |
787 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
788 | unsigned i; | |
789 | bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); | |
790 | ||
791 | if (gtt && gtt->userptr) { | |
792 | kfree(ttm->sg); | |
793 | ttm->page_flags &= ~TTM_PAGE_FLAG_SG; | |
794 | return; | |
795 | } | |
796 | ||
797 | if (slave) | |
798 | return; | |
799 | ||
800 | adev = amdgpu_get_adev(ttm->bdev); | |
801 | ||
802 | #ifdef CONFIG_SWIOTLB | |
803 | if (swiotlb_nr_tbl()) { | |
804 | ttm_dma_unpopulate(>t->ttm, adev->dev); | |
805 | return; | |
806 | } | |
807 | #endif | |
808 | ||
809 | for (i = 0; i < ttm->num_pages; i++) { | |
810 | if (gtt->ttm.dma_address[i]) { | |
811 | pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i], | |
812 | PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); | |
813 | } | |
814 | } | |
815 | ||
816 | ttm_pool_unpopulate(ttm); | |
817 | } | |
818 | ||
819 | int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, | |
820 | uint32_t flags) | |
821 | { | |
822 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
823 | ||
824 | if (gtt == NULL) | |
825 | return -EINVAL; | |
826 | ||
827 | gtt->userptr = addr; | |
828 | gtt->usermm = current->mm; | |
829 | gtt->userflags = flags; | |
637dd3b5 CK |
830 | spin_lock_init(>t->guptasklock); |
831 | INIT_LIST_HEAD(>t->guptasks); | |
2f568dbd | 832 | atomic_set(>t->mmu_invalidations, 0); |
637dd3b5 | 833 | |
d38ceaf9 AD |
834 | return 0; |
835 | } | |
836 | ||
cc325d19 | 837 | struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) |
d38ceaf9 AD |
838 | { |
839 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
840 | ||
841 | if (gtt == NULL) | |
cc325d19 | 842 | return NULL; |
d38ceaf9 | 843 | |
cc325d19 | 844 | return gtt->usermm; |
d38ceaf9 AD |
845 | } |
846 | ||
cc1de6e8 CK |
847 | bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, |
848 | unsigned long end) | |
849 | { | |
850 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
637dd3b5 | 851 | struct amdgpu_ttm_gup_task_list *entry; |
cc1de6e8 CK |
852 | unsigned long size; |
853 | ||
637dd3b5 | 854 | if (gtt == NULL || !gtt->userptr) |
cc1de6e8 CK |
855 | return false; |
856 | ||
857 | size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; | |
858 | if (gtt->userptr > end || gtt->userptr + size <= start) | |
859 | return false; | |
860 | ||
637dd3b5 CK |
861 | spin_lock(>t->guptasklock); |
862 | list_for_each_entry(entry, >t->guptasks, list) { | |
863 | if (entry->task == current) { | |
864 | spin_unlock(>t->guptasklock); | |
865 | return false; | |
866 | } | |
867 | } | |
868 | spin_unlock(>t->guptasklock); | |
869 | ||
2f568dbd CK |
870 | atomic_inc(>t->mmu_invalidations); |
871 | ||
cc1de6e8 CK |
872 | return true; |
873 | } | |
874 | ||
2f568dbd CK |
875 | bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm, |
876 | int *last_invalidated) | |
877 | { | |
878 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
879 | int prev_invalidated = *last_invalidated; | |
880 | ||
881 | *last_invalidated = atomic_read(>t->mmu_invalidations); | |
882 | return prev_invalidated != *last_invalidated; | |
883 | } | |
884 | ||
d38ceaf9 AD |
885 | bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) |
886 | { | |
887 | struct amdgpu_ttm_tt *gtt = (void *)ttm; | |
888 | ||
889 | if (gtt == NULL) | |
890 | return false; | |
891 | ||
892 | return !!(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); | |
893 | } | |
894 | ||
895 | uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, | |
896 | struct ttm_mem_reg *mem) | |
897 | { | |
898 | uint32_t flags = 0; | |
899 | ||
900 | if (mem && mem->mem_type != TTM_PL_SYSTEM) | |
901 | flags |= AMDGPU_PTE_VALID; | |
902 | ||
6d99905a | 903 | if (mem && mem->mem_type == TTM_PL_TT) { |
d38ceaf9 AD |
904 | flags |= AMDGPU_PTE_SYSTEM; |
905 | ||
6d99905a CK |
906 | if (ttm->caching_state == tt_cached) |
907 | flags |= AMDGPU_PTE_SNOOPED; | |
908 | } | |
d38ceaf9 | 909 | |
8f3c1629 | 910 | if (adev->asic_type >= CHIP_TONGA) |
d38ceaf9 AD |
911 | flags |= AMDGPU_PTE_EXECUTABLE; |
912 | ||
913 | flags |= AMDGPU_PTE_READABLE; | |
914 | ||
915 | if (!amdgpu_ttm_tt_is_readonly(ttm)) | |
916 | flags |= AMDGPU_PTE_WRITEABLE; | |
917 | ||
918 | return flags; | |
919 | } | |
920 | ||
29b3259a CK |
921 | static void amdgpu_ttm_lru_removal(struct ttm_buffer_object *tbo) |
922 | { | |
923 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | |
924 | unsigned i, j; | |
925 | ||
926 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { | |
927 | struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; | |
928 | ||
929 | for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) | |
930 | if (&tbo->lru == lru->lru[j]) | |
931 | lru->lru[j] = tbo->lru.prev; | |
932 | ||
933 | if (&tbo->swap == lru->swap_lru) | |
934 | lru->swap_lru = tbo->swap.prev; | |
935 | } | |
936 | } | |
937 | ||
938 | static struct amdgpu_mman_lru *amdgpu_ttm_lru(struct ttm_buffer_object *tbo) | |
939 | { | |
940 | struct amdgpu_device *adev = amdgpu_get_adev(tbo->bdev); | |
941 | unsigned log2_size = min(ilog2(tbo->num_pages), | |
942 | AMDGPU_TTM_LRU_SIZE - 1); | |
943 | ||
944 | return &adev->mman.log2_size[log2_size]; | |
945 | } | |
946 | ||
947 | static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo) | |
948 | { | |
949 | struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); | |
950 | struct list_head *res = lru->lru[tbo->mem.mem_type]; | |
951 | ||
952 | lru->lru[tbo->mem.mem_type] = &tbo->lru; | |
953 | ||
954 | return res; | |
955 | } | |
956 | ||
957 | static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo) | |
958 | { | |
959 | struct amdgpu_mman_lru *lru = amdgpu_ttm_lru(tbo); | |
960 | struct list_head *res = lru->swap_lru; | |
961 | ||
962 | lru->swap_lru = &tbo->swap; | |
963 | ||
964 | return res; | |
965 | } | |
966 | ||
d38ceaf9 AD |
967 | static struct ttm_bo_driver amdgpu_bo_driver = { |
968 | .ttm_tt_create = &amdgpu_ttm_tt_create, | |
969 | .ttm_tt_populate = &amdgpu_ttm_tt_populate, | |
970 | .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, | |
971 | .invalidate_caches = &amdgpu_invalidate_caches, | |
972 | .init_mem_type = &amdgpu_init_mem_type, | |
973 | .evict_flags = &amdgpu_evict_flags, | |
974 | .move = &amdgpu_bo_move, | |
975 | .verify_access = &amdgpu_verify_access, | |
976 | .move_notify = &amdgpu_bo_move_notify, | |
977 | .fault_reserve_notify = &amdgpu_bo_fault_reserve_notify, | |
978 | .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, | |
979 | .io_mem_free = &amdgpu_ttm_io_mem_free, | |
29b3259a CK |
980 | .lru_removal = &amdgpu_ttm_lru_removal, |
981 | .lru_tail = &amdgpu_ttm_lru_tail, | |
982 | .swap_lru_tail = &amdgpu_ttm_swap_lru_tail, | |
d38ceaf9 AD |
983 | }; |
984 | ||
985 | int amdgpu_ttm_init(struct amdgpu_device *adev) | |
986 | { | |
29b3259a | 987 | unsigned i, j; |
d38ceaf9 AD |
988 | int r; |
989 | ||
990 | r = amdgpu_ttm_global_init(adev); | |
991 | if (r) { | |
992 | return r; | |
993 | } | |
994 | /* No others user of address space so set it to 0 */ | |
995 | r = ttm_bo_device_init(&adev->mman.bdev, | |
996 | adev->mman.bo_global_ref.ref.object, | |
997 | &amdgpu_bo_driver, | |
998 | adev->ddev->anon_inode->i_mapping, | |
999 | DRM_FILE_PAGE_OFFSET, | |
1000 | adev->need_dma32); | |
1001 | if (r) { | |
1002 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
1003 | return r; | |
1004 | } | |
29b3259a CK |
1005 | |
1006 | for (i = 0; i < AMDGPU_TTM_LRU_SIZE; ++i) { | |
1007 | struct amdgpu_mman_lru *lru = &adev->mman.log2_size[i]; | |
1008 | ||
1009 | for (j = 0; j < TTM_NUM_MEM_TYPES; ++j) | |
1010 | lru->lru[j] = &adev->mman.bdev.man[j].lru; | |
1011 | lru->swap_lru = &adev->mman.bdev.glob->swap_lru; | |
1012 | } | |
1013 | ||
d38ceaf9 AD |
1014 | adev->mman.initialized = true; |
1015 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM, | |
1016 | adev->mc.real_vram_size >> PAGE_SHIFT); | |
1017 | if (r) { | |
1018 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
1019 | return r; | |
1020 | } | |
1021 | /* Change the size here instead of the init above so only lpfn is affected */ | |
1022 | amdgpu_ttm_set_active_vram_size(adev, adev->mc.visible_vram_size); | |
1023 | ||
1024 | r = amdgpu_bo_create(adev, 256 * 1024, PAGE_SIZE, true, | |
857d913d AD |
1025 | AMDGPU_GEM_DOMAIN_VRAM, |
1026 | AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, | |
72d7668b | 1027 | NULL, NULL, &adev->stollen_vga_memory); |
d38ceaf9 AD |
1028 | if (r) { |
1029 | return r; | |
1030 | } | |
1031 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | |
1032 | if (r) | |
1033 | return r; | |
1034 | r = amdgpu_bo_pin(adev->stollen_vga_memory, AMDGPU_GEM_DOMAIN_VRAM, NULL); | |
1035 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | |
1036 | if (r) { | |
1037 | amdgpu_bo_unref(&adev->stollen_vga_memory); | |
1038 | return r; | |
1039 | } | |
1040 | DRM_INFO("amdgpu: %uM of VRAM memory ready\n", | |
1041 | (unsigned) (adev->mc.real_vram_size / (1024 * 1024))); | |
1042 | r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, | |
1043 | adev->mc.gtt_size >> PAGE_SHIFT); | |
1044 | if (r) { | |
1045 | DRM_ERROR("Failed initializing GTT heap.\n"); | |
1046 | return r; | |
1047 | } | |
1048 | DRM_INFO("amdgpu: %uM of GTT memory ready.\n", | |
1049 | (unsigned)(adev->mc.gtt_size / (1024 * 1024))); | |
1050 | ||
1051 | adev->gds.mem.total_size = adev->gds.mem.total_size << AMDGPU_GDS_SHIFT; | |
1052 | adev->gds.mem.gfx_partition_size = adev->gds.mem.gfx_partition_size << AMDGPU_GDS_SHIFT; | |
1053 | adev->gds.mem.cs_partition_size = adev->gds.mem.cs_partition_size << AMDGPU_GDS_SHIFT; | |
1054 | adev->gds.gws.total_size = adev->gds.gws.total_size << AMDGPU_GWS_SHIFT; | |
1055 | adev->gds.gws.gfx_partition_size = adev->gds.gws.gfx_partition_size << AMDGPU_GWS_SHIFT; | |
1056 | adev->gds.gws.cs_partition_size = adev->gds.gws.cs_partition_size << AMDGPU_GWS_SHIFT; | |
1057 | adev->gds.oa.total_size = adev->gds.oa.total_size << AMDGPU_OA_SHIFT; | |
1058 | adev->gds.oa.gfx_partition_size = adev->gds.oa.gfx_partition_size << AMDGPU_OA_SHIFT; | |
1059 | adev->gds.oa.cs_partition_size = adev->gds.oa.cs_partition_size << AMDGPU_OA_SHIFT; | |
1060 | /* GDS Memory */ | |
1061 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GDS, | |
1062 | adev->gds.mem.total_size >> PAGE_SHIFT); | |
1063 | if (r) { | |
1064 | DRM_ERROR("Failed initializing GDS heap.\n"); | |
1065 | return r; | |
1066 | } | |
1067 | ||
1068 | /* GWS */ | |
1069 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_GWS, | |
1070 | adev->gds.gws.total_size >> PAGE_SHIFT); | |
1071 | if (r) { | |
1072 | DRM_ERROR("Failed initializing gws heap.\n"); | |
1073 | return r; | |
1074 | } | |
1075 | ||
1076 | /* OA */ | |
1077 | r = ttm_bo_init_mm(&adev->mman.bdev, AMDGPU_PL_OA, | |
1078 | adev->gds.oa.total_size >> PAGE_SHIFT); | |
1079 | if (r) { | |
1080 | DRM_ERROR("Failed initializing oa heap.\n"); | |
1081 | return r; | |
1082 | } | |
1083 | ||
1084 | r = amdgpu_ttm_debugfs_init(adev); | |
1085 | if (r) { | |
1086 | DRM_ERROR("Failed to init debugfs\n"); | |
1087 | return r; | |
1088 | } | |
1089 | return 0; | |
1090 | } | |
1091 | ||
1092 | void amdgpu_ttm_fini(struct amdgpu_device *adev) | |
1093 | { | |
1094 | int r; | |
1095 | ||
1096 | if (!adev->mman.initialized) | |
1097 | return; | |
1098 | amdgpu_ttm_debugfs_fini(adev); | |
1099 | if (adev->stollen_vga_memory) { | |
1100 | r = amdgpu_bo_reserve(adev->stollen_vga_memory, false); | |
1101 | if (r == 0) { | |
1102 | amdgpu_bo_unpin(adev->stollen_vga_memory); | |
1103 | amdgpu_bo_unreserve(adev->stollen_vga_memory); | |
1104 | } | |
1105 | amdgpu_bo_unref(&adev->stollen_vga_memory); | |
1106 | } | |
1107 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM); | |
1108 | ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT); | |
1109 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GDS); | |
1110 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_GWS); | |
1111 | ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA); | |
1112 | ttm_bo_device_release(&adev->mman.bdev); | |
1113 | amdgpu_gart_fini(adev); | |
1114 | amdgpu_ttm_global_fini(adev); | |
1115 | adev->mman.initialized = false; | |
1116 | DRM_INFO("amdgpu: ttm finalized\n"); | |
1117 | } | |
1118 | ||
1119 | /* this should only be called at bootup or when userspace | |
1120 | * isn't running */ | |
1121 | void amdgpu_ttm_set_active_vram_size(struct amdgpu_device *adev, u64 size) | |
1122 | { | |
1123 | struct ttm_mem_type_manager *man; | |
1124 | ||
1125 | if (!adev->mman.initialized) | |
1126 | return; | |
1127 | ||
1128 | man = &adev->mman.bdev.man[TTM_PL_VRAM]; | |
1129 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | |
1130 | man->size = size >> PAGE_SHIFT; | |
1131 | } | |
1132 | ||
d38ceaf9 AD |
1133 | int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) |
1134 | { | |
1135 | struct drm_file *file_priv; | |
1136 | struct amdgpu_device *adev; | |
d38ceaf9 | 1137 | |
e176fe17 | 1138 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) |
d38ceaf9 | 1139 | return -EINVAL; |
d38ceaf9 AD |
1140 | |
1141 | file_priv = filp->private_data; | |
1142 | adev = file_priv->minor->dev->dev_private; | |
e176fe17 | 1143 | if (adev == NULL) |
d38ceaf9 | 1144 | return -EINVAL; |
e176fe17 CK |
1145 | |
1146 | return ttm_bo_mmap(filp, vma, &adev->mman.bdev); | |
d38ceaf9 AD |
1147 | } |
1148 | ||
1149 | int amdgpu_copy_buffer(struct amdgpu_ring *ring, | |
1150 | uint64_t src_offset, | |
1151 | uint64_t dst_offset, | |
1152 | uint32_t byte_count, | |
1153 | struct reservation_object *resv, | |
c7ae72c0 | 1154 | struct fence **fence) |
d38ceaf9 AD |
1155 | { |
1156 | struct amdgpu_device *adev = ring->adev; | |
d71518b5 CK |
1157 | struct amdgpu_job *job; |
1158 | ||
d38ceaf9 AD |
1159 | uint32_t max_bytes; |
1160 | unsigned num_loops, num_dw; | |
1161 | unsigned i; | |
1162 | int r; | |
1163 | ||
d38ceaf9 AD |
1164 | max_bytes = adev->mman.buffer_funcs->copy_max_bytes; |
1165 | num_loops = DIV_ROUND_UP(byte_count, max_bytes); | |
1166 | num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; | |
1167 | ||
c7ae72c0 CZ |
1168 | /* for IB padding */ |
1169 | while (num_dw & 0x7) | |
1170 | num_dw++; | |
1171 | ||
d71518b5 CK |
1172 | r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); |
1173 | if (r) | |
9066b0c3 | 1174 | return r; |
c7ae72c0 CZ |
1175 | |
1176 | if (resv) { | |
e86f9cee | 1177 | r = amdgpu_sync_resv(adev, &job->sync, resv, |
c7ae72c0 CZ |
1178 | AMDGPU_FENCE_OWNER_UNDEFINED); |
1179 | if (r) { | |
1180 | DRM_ERROR("sync failed (%d).\n", r); | |
1181 | goto error_free; | |
1182 | } | |
d38ceaf9 | 1183 | } |
d38ceaf9 AD |
1184 | |
1185 | for (i = 0; i < num_loops; i++) { | |
1186 | uint32_t cur_size_in_bytes = min(byte_count, max_bytes); | |
1187 | ||
d71518b5 CK |
1188 | amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset, |
1189 | dst_offset, cur_size_in_bytes); | |
d38ceaf9 AD |
1190 | |
1191 | src_offset += cur_size_in_bytes; | |
1192 | dst_offset += cur_size_in_bytes; | |
1193 | byte_count -= cur_size_in_bytes; | |
1194 | } | |
1195 | ||
d71518b5 CK |
1196 | amdgpu_ring_pad_ib(ring, &job->ibs[0]); |
1197 | WARN_ON(job->ibs[0].length_dw > num_dw); | |
703297c1 CK |
1198 | r = amdgpu_job_submit(job, ring, &adev->mman.entity, |
1199 | AMDGPU_FENCE_OWNER_UNDEFINED, fence); | |
c7ae72c0 CZ |
1200 | if (r) |
1201 | goto error_free; | |
d38ceaf9 AD |
1202 | |
1203 | return 0; | |
d71518b5 | 1204 | |
c7ae72c0 | 1205 | error_free: |
d71518b5 | 1206 | amdgpu_job_free(job); |
c7ae72c0 | 1207 | return r; |
d38ceaf9 AD |
1208 | } |
1209 | ||
1210 | #if defined(CONFIG_DEBUG_FS) | |
1211 | ||
1212 | static int amdgpu_mm_dump_table(struct seq_file *m, void *data) | |
1213 | { | |
1214 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
1215 | unsigned ttm_pl = *(int *)node->info_ent->data; | |
1216 | struct drm_device *dev = node->minor->dev; | |
1217 | struct amdgpu_device *adev = dev->dev_private; | |
1218 | struct drm_mm *mm = (struct drm_mm *)adev->mman.bdev.man[ttm_pl].priv; | |
1219 | int ret; | |
1220 | struct ttm_bo_global *glob = adev->mman.bdev.glob; | |
1221 | ||
1222 | spin_lock(&glob->lru_lock); | |
1223 | ret = drm_mm_dump_table(m, mm); | |
1224 | spin_unlock(&glob->lru_lock); | |
a2ef8a97 | 1225 | if (ttm_pl == TTM_PL_VRAM) |
e1b35f61 | 1226 | seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n", |
a2ef8a97 | 1227 | adev->mman.bdev.man[ttm_pl].size, |
e1b35f61 AB |
1228 | (u64)atomic64_read(&adev->vram_usage) >> 20, |
1229 | (u64)atomic64_read(&adev->vram_vis_usage) >> 20); | |
d38ceaf9 AD |
1230 | return ret; |
1231 | } | |
1232 | ||
1233 | static int ttm_pl_vram = TTM_PL_VRAM; | |
1234 | static int ttm_pl_tt = TTM_PL_TT; | |
1235 | ||
06ab6832 | 1236 | static const struct drm_info_list amdgpu_ttm_debugfs_list[] = { |
d38ceaf9 AD |
1237 | {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, &ttm_pl_vram}, |
1238 | {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, &ttm_pl_tt}, | |
1239 | {"ttm_page_pool", ttm_page_alloc_debugfs, 0, NULL}, | |
1240 | #ifdef CONFIG_SWIOTLB | |
1241 | {"ttm_dma_page_pool", ttm_dma_page_alloc_debugfs, 0, NULL} | |
1242 | #endif | |
1243 | }; | |
1244 | ||
1245 | static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf, | |
1246 | size_t size, loff_t *pos) | |
1247 | { | |
1248 | struct amdgpu_device *adev = f->f_inode->i_private; | |
1249 | ssize_t result = 0; | |
1250 | int r; | |
1251 | ||
1252 | if (size & 0x3 || *pos & 0x3) | |
1253 | return -EINVAL; | |
1254 | ||
1255 | while (size) { | |
1256 | unsigned long flags; | |
1257 | uint32_t value; | |
1258 | ||
1259 | if (*pos >= adev->mc.mc_vram_size) | |
1260 | return result; | |
1261 | ||
1262 | spin_lock_irqsave(&adev->mmio_idx_lock, flags); | |
1263 | WREG32(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); | |
1264 | WREG32(mmMM_INDEX_HI, *pos >> 31); | |
1265 | value = RREG32(mmMM_DATA); | |
1266 | spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); | |
1267 | ||
1268 | r = put_user(value, (uint32_t *)buf); | |
1269 | if (r) | |
1270 | return r; | |
1271 | ||
1272 | result += 4; | |
1273 | buf += 4; | |
1274 | *pos += 4; | |
1275 | size -= 4; | |
1276 | } | |
1277 | ||
1278 | return result; | |
1279 | } | |
1280 | ||
1281 | static const struct file_operations amdgpu_ttm_vram_fops = { | |
1282 | .owner = THIS_MODULE, | |
1283 | .read = amdgpu_ttm_vram_read, | |
1284 | .llseek = default_llseek | |
1285 | }; | |
1286 | ||
a1d29476 CK |
1287 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
1288 | ||
d38ceaf9 AD |
1289 | static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf, |
1290 | size_t size, loff_t *pos) | |
1291 | { | |
1292 | struct amdgpu_device *adev = f->f_inode->i_private; | |
1293 | ssize_t result = 0; | |
1294 | int r; | |
1295 | ||
1296 | while (size) { | |
1297 | loff_t p = *pos / PAGE_SIZE; | |
1298 | unsigned off = *pos & ~PAGE_MASK; | |
1299 | size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); | |
1300 | struct page *page; | |
1301 | void *ptr; | |
1302 | ||
1303 | if (p >= adev->gart.num_cpu_pages) | |
1304 | return result; | |
1305 | ||
1306 | page = adev->gart.pages[p]; | |
1307 | if (page) { | |
1308 | ptr = kmap(page); | |
1309 | ptr += off; | |
1310 | ||
1311 | r = copy_to_user(buf, ptr, cur_size); | |
1312 | kunmap(adev->gart.pages[p]); | |
1313 | } else | |
1314 | r = clear_user(buf, cur_size); | |
1315 | ||
1316 | if (r) | |
1317 | return -EFAULT; | |
1318 | ||
1319 | result += cur_size; | |
1320 | buf += cur_size; | |
1321 | *pos += cur_size; | |
1322 | size -= cur_size; | |
1323 | } | |
1324 | ||
1325 | return result; | |
1326 | } | |
1327 | ||
1328 | static const struct file_operations amdgpu_ttm_gtt_fops = { | |
1329 | .owner = THIS_MODULE, | |
1330 | .read = amdgpu_ttm_gtt_read, | |
1331 | .llseek = default_llseek | |
1332 | }; | |
1333 | ||
1334 | #endif | |
1335 | ||
a1d29476 CK |
1336 | #endif |
1337 | ||
d38ceaf9 AD |
1338 | static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) |
1339 | { | |
1340 | #if defined(CONFIG_DEBUG_FS) | |
1341 | unsigned count; | |
1342 | ||
1343 | struct drm_minor *minor = adev->ddev->primary; | |
1344 | struct dentry *ent, *root = minor->debugfs_root; | |
1345 | ||
1346 | ent = debugfs_create_file("amdgpu_vram", S_IFREG | S_IRUGO, root, | |
1347 | adev, &amdgpu_ttm_vram_fops); | |
1348 | if (IS_ERR(ent)) | |
1349 | return PTR_ERR(ent); | |
1350 | i_size_write(ent->d_inode, adev->mc.mc_vram_size); | |
1351 | adev->mman.vram = ent; | |
1352 | ||
a1d29476 | 1353 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
d38ceaf9 AD |
1354 | ent = debugfs_create_file("amdgpu_gtt", S_IFREG | S_IRUGO, root, |
1355 | adev, &amdgpu_ttm_gtt_fops); | |
1356 | if (IS_ERR(ent)) | |
1357 | return PTR_ERR(ent); | |
1358 | i_size_write(ent->d_inode, adev->mc.gtt_size); | |
1359 | adev->mman.gtt = ent; | |
1360 | ||
a1d29476 | 1361 | #endif |
d38ceaf9 AD |
1362 | count = ARRAY_SIZE(amdgpu_ttm_debugfs_list); |
1363 | ||
1364 | #ifdef CONFIG_SWIOTLB | |
1365 | if (!swiotlb_nr_tbl()) | |
1366 | --count; | |
1367 | #endif | |
1368 | ||
1369 | return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count); | |
1370 | #else | |
1371 | ||
1372 | return 0; | |
1373 | #endif | |
1374 | } | |
1375 | ||
1376 | static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev) | |
1377 | { | |
1378 | #if defined(CONFIG_DEBUG_FS) | |
1379 | ||
1380 | debugfs_remove(adev->mman.vram); | |
1381 | adev->mman.vram = NULL; | |
1382 | ||
a1d29476 | 1383 | #ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS |
d38ceaf9 AD |
1384 | debugfs_remove(adev->mman.gtt); |
1385 | adev->mman.gtt = NULL; | |
1386 | #endif | |
a1d29476 CK |
1387 | |
1388 | #endif | |
d38ceaf9 | 1389 | } |