drm/radeon: use an intervall tree to manage the VMA v2
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_vm.c
CommitLineData
2280ab57
CK
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <drm/drmP.h>
29#include <drm/radeon_drm.h>
30#include "radeon.h"
31#include "radeon_trace.h"
32
33/*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53/**
54 * radeon_vm_num_pde - return the number of page directory entries
55 *
56 * @rdev: radeon_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
60static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61{
4510fb98 62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
2280ab57
CK
63}
64
65/**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @rdev: radeon_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
72static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73{
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75}
76
77/**
78 * radeon_vm_manager_init - init the vm manager
79 *
80 * @rdev: radeon_device pointer
81 *
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
84 */
85int radeon_vm_manager_init(struct radeon_device *rdev)
86{
2280ab57 87 int r;
2280ab57
CK
88
89 if (!rdev->vm_manager.enabled) {
2280ab57
CK
90 r = radeon_asic_vm_init(rdev);
91 if (r)
92 return r;
93
94 rdev->vm_manager.enabled = true;
2280ab57
CK
95 }
96 return 0;
97}
98
2280ab57
CK
99/**
100 * radeon_vm_manager_fini - tear down the vm manager
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Tear down the VM manager (cayman+).
105 */
106void radeon_vm_manager_fini(struct radeon_device *rdev)
107{
2280ab57
CK
108 int i;
109
110 if (!rdev->vm_manager.enabled)
111 return;
112
6d2f2944 113 for (i = 0; i < RADEON_NUM_VM; ++i)
2280ab57 114 radeon_fence_unref(&rdev->vm_manager.active[i]);
2280ab57 115 radeon_asic_vm_fini(rdev);
2280ab57
CK
116 rdev->vm_manager.enabled = false;
117}
118
119/**
6d2f2944 120 * radeon_vm_get_bos - add the vm BOs to a validation list
2280ab57 121 *
6d2f2944
CK
122 * @vm: vm providing the BOs
123 * @head: head of validation list
2280ab57 124 *
6d2f2944
CK
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
2280ab57 127 */
df0af440
CK
128struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm,
130 struct list_head *head)
2280ab57 131{
df0af440 132 struct radeon_cs_reloc *list;
7d95f6cc 133 unsigned i, idx;
2280ab57 134
2f93dc32 135 list = kmalloc_array(vm->max_pde_used + 2,
7d95f6cc 136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
6d2f2944
CK
137 if (!list)
138 return NULL;
2280ab57 139
6d2f2944 140 /* add the vm page table to the list */
df0af440
CK
141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory;
ce6758c8
CK
143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
6d2f2944 145 list[0].tv.bo = &vm->page_directory->tbo;
df0af440
CK
146 list[0].tiling_flags = 0;
147 list[0].handle = 0;
6d2f2944 148 list_add(&list[0].tv.head, head);
2280ab57 149
6d2f2944
CK
150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 if (!vm->page_tables[i].bo)
152 continue;
2280ab57 153
df0af440
CK
154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo;
ce6758c8
CK
156 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
df0af440
CK
158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0;
6d2f2944 161 list_add(&list[idx++].tv.head, head);
2280ab57
CK
162 }
163
6d2f2944 164 return list;
2280ab57
CK
165}
166
167/**
168 * radeon_vm_grab_id - allocate the next free VMID
169 *
170 * @rdev: radeon_device pointer
171 * @vm: vm to allocate id for
172 * @ring: ring we want to submit job to
173 *
174 * Allocate an id for the vm (cayman+).
175 * Returns the fence we need to sync to (if any).
176 *
177 * Global and local mutex must be locked!
178 */
179struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
180 struct radeon_vm *vm, int ring)
181{
182 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
183 unsigned choices[2] = {};
184 unsigned i;
185
186 /* check if the id is still valid */
187 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
188 return NULL;
189
190 /* we definately need to flush */
191 radeon_fence_unref(&vm->last_flush);
192
193 /* skip over VMID 0, since it is the system VM */
194 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
195 struct radeon_fence *fence = rdev->vm_manager.active[i];
196
197 if (fence == NULL) {
198 /* found a free one */
199 vm->id = i;
200 trace_radeon_vm_grab_id(vm->id, ring);
201 return NULL;
202 }
203
204 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
205 best[fence->ring] = fence;
206 choices[fence->ring == ring ? 0 : 1] = i;
207 }
208 }
209
210 for (i = 0; i < 2; ++i) {
211 if (choices[i]) {
212 vm->id = choices[i];
213 trace_radeon_vm_grab_id(vm->id, ring);
214 return rdev->vm_manager.active[choices[i]];
215 }
216 }
217
218 /* should never happen */
219 BUG();
220 return NULL;
221}
222
fa688343
CK
223/**
224 * radeon_vm_flush - hardware flush the vm
225 *
226 * @rdev: radeon_device pointer
227 * @vm: vm we want to flush
228 * @ring: ring to use for flush
229 *
230 * Flush the vm (cayman+).
231 *
232 * Global and local mutex must be locked!
233 */
234void radeon_vm_flush(struct radeon_device *rdev,
235 struct radeon_vm *vm,
236 int ring)
237{
6d2f2944
CK
238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
239
fa688343
CK
240 /* if we can't remember our last VM flush then flush now! */
241 /* XXX figure out why we have to flush all the time */
6d2f2944 242 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
a3a9226d 243 trace_radeon_vm_flush(pd_addr, ring, vm->id);
6d2f2944 244 vm->pd_gpu_addr = pd_addr;
fa688343 245 radeon_ring_vm_flush(rdev, ring, vm);
6d2f2944 246 }
fa688343
CK
247}
248
2280ab57
CK
249/**
250 * radeon_vm_fence - remember fence for vm
251 *
252 * @rdev: radeon_device pointer
253 * @vm: vm we want to fence
254 * @fence: fence to remember
255 *
256 * Fence the vm (cayman+).
257 * Set the fence used to protect page table and id.
258 *
259 * Global and local mutex must be locked!
260 */
261void radeon_vm_fence(struct radeon_device *rdev,
262 struct radeon_vm *vm,
263 struct radeon_fence *fence)
264{
2280ab57
CK
265 radeon_fence_unref(&vm->fence);
266 vm->fence = radeon_fence_ref(fence);
267
fa688343
CK
268 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
269 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
270
2280ab57
CK
271 radeon_fence_unref(&vm->last_id_use);
272 vm->last_id_use = radeon_fence_ref(fence);
fa688343
CK
273
274 /* we just flushed the VM, remember that */
275 if (!vm->last_flush)
276 vm->last_flush = radeon_fence_ref(fence);
2280ab57
CK
277}
278
279/**
280 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
281 *
282 * @vm: requested vm
283 * @bo: requested buffer object
284 *
285 * Find @bo inside the requested vm (cayman+).
286 * Search inside the @bos vm list for the requested vm
287 * Returns the found bo_va or NULL if none is found
288 *
289 * Object has to be reserved!
290 */
291struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
292 struct radeon_bo *bo)
293{
294 struct radeon_bo_va *bo_va;
295
296 list_for_each_entry(bo_va, &bo->va, bo_list) {
297 if (bo_va->vm == vm) {
298 return bo_va;
299 }
300 }
301 return NULL;
302}
303
304/**
305 * radeon_vm_bo_add - add a bo to a specific vm
306 *
307 * @rdev: radeon_device pointer
308 * @vm: requested vm
309 * @bo: radeon buffer object
310 *
311 * Add @bo into the requested vm (cayman+).
312 * Add @bo to the list of bos associated with the vm
313 * Returns newly added bo_va or NULL for failure
314 *
315 * Object has to be reserved!
316 */
317struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
318 struct radeon_vm *vm,
319 struct radeon_bo *bo)
320{
321 struct radeon_bo_va *bo_va;
322
323 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
324 if (bo_va == NULL) {
325 return NULL;
326 }
327 bo_va->vm = vm;
328 bo_va->bo = bo;
0aea5e4a
AD
329 bo_va->it.start = 0;
330 bo_va->it.last = 0;
2280ab57 331 bo_va->flags = 0;
e31ad969 332 bo_va->addr = 0;
2280ab57
CK
333 bo_va->ref_count = 1;
334 INIT_LIST_HEAD(&bo_va->bo_list);
036bf46a 335 INIT_LIST_HEAD(&bo_va->vm_status);
2280ab57
CK
336
337 mutex_lock(&vm->mutex);
2280ab57
CK
338 list_add_tail(&bo_va->bo_list, &bo->va);
339 mutex_unlock(&vm->mutex);
340
341 return bo_va;
342}
343
6d2f2944
CK
344/**
345 * radeon_vm_clear_bo - initially clear the page dir/table
346 *
347 * @rdev: radeon_device pointer
348 * @bo: bo to clear
349 */
350static int radeon_vm_clear_bo(struct radeon_device *rdev,
351 struct radeon_bo *bo)
352{
353 struct ttm_validate_buffer tv;
354 struct ww_acquire_ctx ticket;
355 struct list_head head;
356 struct radeon_ib ib;
357 unsigned entries;
358 uint64_t addr;
359 int r;
360
361 memset(&tv, 0, sizeof(tv));
362 tv.bo = &bo->tbo;
363
364 INIT_LIST_HEAD(&head);
365 list_add(&tv.head, &head);
366
367 r = ttm_eu_reserve_buffers(&ticket, &head);
368 if (r)
369 return r;
370
371 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
372 if (r)
373 goto error;
374
375 addr = radeon_bo_gpu_offset(bo);
376 entries = radeon_bo_size(bo) / 8;
377
378 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
379 NULL, entries * 2 + 64);
380 if (r)
381 goto error;
382
383 ib.length_dw = 0;
384
385 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
386
387 r = radeon_ib_schedule(rdev, &ib, NULL);
388 if (r)
389 goto error;
390
391 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
392 radeon_ib_free(rdev, &ib);
393
394 return 0;
395
396error:
397 ttm_eu_backoff_reservation(&ticket, &head);
398 return r;
399}
400
2280ab57
CK
401/**
402 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
403 *
404 * @rdev: radeon_device pointer
405 * @bo_va: bo_va to store the address
406 * @soffset: requested offset of the buffer in the VM address space
407 * @flags: attributes of pages (read/write/valid/etc.)
408 *
409 * Set offset of @bo_va (cayman+).
410 * Validate and set the offset requested within the vm address space.
411 * Returns 0 for success, error for failure.
412 *
413 * Object has to be reserved!
414 */
415int radeon_vm_bo_set_addr(struct radeon_device *rdev,
416 struct radeon_bo_va *bo_va,
417 uint64_t soffset,
418 uint32_t flags)
419{
420 uint64_t size = radeon_bo_size(bo_va->bo);
2280ab57 421 struct radeon_vm *vm = bo_va->vm;
6d2f2944 422 unsigned last_pfn, pt_idx;
0aea5e4a 423 uint64_t eoffset;
6d2f2944 424 int r;
2280ab57
CK
425
426 if (soffset) {
427 /* make sure object fit at this offset */
428 eoffset = soffset + size;
429 if (soffset >= eoffset) {
430 return -EINVAL;
431 }
432
433 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
434 if (last_pfn > rdev->vm_manager.max_pfn) {
435 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
436 last_pfn, rdev->vm_manager.max_pfn);
437 return -EINVAL;
438 }
439
440 } else {
441 eoffset = last_pfn = 0;
442 }
443
444 mutex_lock(&vm->mutex);
0aea5e4a
AD
445 if (bo_va->it.start || bo_va->it.last) {
446 if (bo_va->addr) {
447 /* add a clone of the bo_va to clear the old address */
448 struct radeon_bo_va *tmp;
449 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
450 tmp->it.start = bo_va->it.start;
451 tmp->it.last = bo_va->it.last;
452 tmp->vm = vm;
453 tmp->addr = bo_va->addr;
454 list_add(&tmp->vm_status, &vm->freed);
2280ab57
CK
455 }
456
0aea5e4a
AD
457 interval_tree_remove(&bo_va->it, &vm->va);
458 bo_va->it.start = 0;
459 bo_va->it.last = 0;
2280ab57
CK
460 }
461
0aea5e4a
AD
462 soffset /= RADEON_GPU_PAGE_SIZE;
463 eoffset /= RADEON_GPU_PAGE_SIZE;
464 if (soffset || eoffset) {
465 struct interval_tree_node *it;
466 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
467 if (it) {
468 struct radeon_bo_va *tmp;
469 tmp = container_of(it, struct radeon_bo_va, it);
470 /* bo and tmp overlap, invalid offset */
471 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
472 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
473 soffset, tmp->bo, tmp->it.start, tmp->it.last);
5b753275 474 mutex_unlock(&vm->mutex);
0aea5e4a 475 return -EINVAL;
5b753275 476 }
0aea5e4a
AD
477 bo_va->it.start = soffset;
478 bo_va->it.last = eoffset - 1;
479 interval_tree_insert(&bo_va->it, &vm->va);
036bf46a
CK
480 }
481
2280ab57 482 bo_va->flags = flags;
e31ad969 483 bo_va->addr = 0;
2280ab57 484
0aea5e4a
AD
485 soffset >>= radeon_vm_block_size;
486 eoffset >>= radeon_vm_block_size;
4510fb98
CK
487
488 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
6d2f2944
CK
489
490 if (eoffset > vm->max_pde_used)
491 vm->max_pde_used = eoffset;
492
493 radeon_bo_unreserve(bo_va->bo);
494
495 /* walk over the address space and allocate the page tables */
496 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
497 struct radeon_bo *pt;
498
499 if (vm->page_tables[pt_idx].bo)
500 continue;
501
502 /* drop mutex to allocate and clear page table */
503 mutex_unlock(&vm->mutex);
504
505 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
7dae77f8 506 RADEON_GPU_PAGE_SIZE, true,
02376d82 507 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
6d2f2944
CK
508 if (r)
509 return r;
510
511 r = radeon_vm_clear_bo(rdev, pt);
512 if (r) {
513 radeon_bo_unref(&pt);
514 radeon_bo_reserve(bo_va->bo, false);
515 return r;
516 }
517
518 /* aquire mutex again */
519 mutex_lock(&vm->mutex);
520 if (vm->page_tables[pt_idx].bo) {
521 /* someone else allocated the pt in the meantime */
522 mutex_unlock(&vm->mutex);
523 radeon_bo_unref(&pt);
524 mutex_lock(&vm->mutex);
525 continue;
526 }
527
528 vm->page_tables[pt_idx].addr = 0;
529 vm->page_tables[pt_idx].bo = pt;
530 }
531
2280ab57 532 mutex_unlock(&vm->mutex);
6d2f2944 533 return radeon_bo_reserve(bo_va->bo, false);
2280ab57
CK
534}
535
536/**
537 * radeon_vm_map_gart - get the physical address of a gart page
538 *
539 * @rdev: radeon_device pointer
540 * @addr: the unmapped addr
541 *
542 * Look up the physical address of the page that the pte resolves
543 * to (cayman+).
544 * Returns the physical address of the page.
545 */
546uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
547{
548 uint64_t result;
549
550 /* page table offset */
551 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
552
553 /* in case cpu page size != gpu page size*/
554 result |= addr & (~PAGE_MASK);
555
556 return result;
557}
558
559/**
560 * radeon_vm_page_flags - translate page flags to what the hw uses
561 *
562 * @flags: flags comming from userspace
563 *
564 * Translate the flags the userspace ABI uses to hw flags.
565 */
566static uint32_t radeon_vm_page_flags(uint32_t flags)
567{
568 uint32_t hw_flags = 0;
569 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
570 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
571 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
572 if (flags & RADEON_VM_PAGE_SYSTEM) {
573 hw_flags |= R600_PTE_SYSTEM;
574 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
575 }
576 return hw_flags;
577}
578
579/**
580 * radeon_vm_update_pdes - make sure that page directory is valid
581 *
582 * @rdev: radeon_device pointer
583 * @vm: requested vm
584 * @start: start of GPU address range
585 * @end: end of GPU address range
586 *
587 * Allocates new page tables if necessary
588 * and updates the page directory (cayman+).
589 * Returns 0 for success, error for failure.
590 *
591 * Global and local mutex must be locked!
592 */
6d2f2944
CK
593int radeon_vm_update_page_directory(struct radeon_device *rdev,
594 struct radeon_vm *vm)
2280ab57 595{
37903b5e
CK
596 struct radeon_bo *pd = vm->page_directory;
597 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
4510fb98 598 uint32_t incr = RADEON_VM_PTE_COUNT * 8;
2280ab57 599 uint64_t last_pde = ~0, last_pt = ~0;
6d2f2944
CK
600 unsigned count = 0, pt_idx, ndw;
601 struct radeon_ib ib;
2280ab57
CK
602 int r;
603
6d2f2944
CK
604 /* padding, etc. */
605 ndw = 64;
606
607 /* assume the worst case */
4906f689 608 ndw += vm->max_pde_used * 16;
6d2f2944
CK
609
610 /* update too big for an IB */
611 if (ndw > 0xfffff)
612 return -ENOMEM;
613
614 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
615 if (r)
616 return r;
617 ib.length_dw = 0;
2280ab57
CK
618
619 /* walk over the address space and update the page directory */
6d2f2944
CK
620 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
621 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
2280ab57
CK
622 uint64_t pde, pt;
623
6d2f2944 624 if (bo == NULL)
2280ab57
CK
625 continue;
626
6d2f2944
CK
627 pt = radeon_bo_gpu_offset(bo);
628 if (vm->page_tables[pt_idx].addr == pt)
629 continue;
630 vm->page_tables[pt_idx].addr = pt;
2280ab57 631
6d2f2944 632 pde = pd_addr + pt_idx * 8;
2280ab57
CK
633 if (((last_pde + 8 * count) != pde) ||
634 ((last_pt + incr * count) != pt)) {
635
636 if (count) {
6d2f2944 637 radeon_asic_vm_set_page(rdev, &ib, last_pde,
2280ab57
CK
638 last_pt, count, incr,
639 R600_PTE_VALID);
2280ab57
CK
640 }
641
642 count = 1;
643 last_pde = pde;
644 last_pt = pt;
645 } else {
646 ++count;
647 }
648 }
649
6d2f2944
CK
650 if (count)
651 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
2280ab57
CK
652 incr, R600_PTE_VALID);
653
6d2f2944 654 if (ib.length_dw != 0) {
37903b5e 655 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
6d2f2944
CK
656 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
657 r = radeon_ib_schedule(rdev, &ib, NULL);
658 if (r) {
659 radeon_ib_free(rdev, &ib);
660 return r;
661 }
662 radeon_fence_unref(&vm->fence);
663 vm->fence = radeon_fence_ref(ib.fence);
664 radeon_fence_unref(&vm->last_flush);
2280ab57 665 }
6d2f2944 666 radeon_ib_free(rdev, &ib);
2280ab57
CK
667
668 return 0;
669}
670
ec3dbbcb
CK
671/**
672 * radeon_vm_frag_ptes - add fragment information to PTEs
673 *
674 * @rdev: radeon_device pointer
675 * @ib: IB for the update
676 * @pe_start: first PTE to handle
677 * @pe_end: last PTE to handle
678 * @addr: addr those PTEs should point to
679 * @flags: hw mapping flags
680 *
681 * Global and local mutex must be locked!
682 */
683static void radeon_vm_frag_ptes(struct radeon_device *rdev,
684 struct radeon_ib *ib,
685 uint64_t pe_start, uint64_t pe_end,
686 uint64_t addr, uint32_t flags)
687{
688 /**
689 * The MC L1 TLB supports variable sized pages, based on a fragment
690 * field in the PTE. When this field is set to a non-zero value, page
691 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
692 * flags are considered valid for all PTEs within the fragment range
693 * and corresponding mappings are assumed to be physically contiguous.
694 *
695 * The L1 TLB can store a single PTE for the whole fragment,
696 * significantly increasing the space available for translation
697 * caching. This leads to large improvements in throughput when the
698 * TLB is under pressure.
699 *
700 * The L2 TLB distributes small and large fragments into two
701 * asymmetric partitions. The large fragment cache is significantly
702 * larger. Thus, we try to use large fragments wherever possible.
703 * Userspace can support this by aligning virtual base address and
704 * allocation size to the fragment size.
705 */
706
707 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
708 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
709 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
710 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
711
712 uint64_t frag_start = ALIGN(pe_start, frag_align);
713 uint64_t frag_end = pe_end & ~(frag_align - 1);
714
715 unsigned count;
716
717 /* system pages are non continuously */
718 if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
719 (frag_start >= frag_end)) {
720
721 count = (pe_end - pe_start) / 8;
722 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
723 RADEON_GPU_PAGE_SIZE, flags);
724 return;
725 }
726
727 /* handle the 4K area at the beginning */
728 if (pe_start != frag_start) {
729 count = (frag_start - pe_start) / 8;
730 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
731 RADEON_GPU_PAGE_SIZE, flags);
732 addr += RADEON_GPU_PAGE_SIZE * count;
733 }
734
735 /* handle the area in the middle */
736 count = (frag_end - frag_start) / 8;
737 radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
738 RADEON_GPU_PAGE_SIZE, flags | frag_flags);
739
740 /* handle the 4K area at the end */
741 if (frag_end != pe_end) {
742 addr += RADEON_GPU_PAGE_SIZE * count;
743 count = (pe_end - frag_end) / 8;
744 radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
745 RADEON_GPU_PAGE_SIZE, flags);
746 }
747}
748
2280ab57
CK
749/**
750 * radeon_vm_update_ptes - make sure that page tables are valid
751 *
752 * @rdev: radeon_device pointer
753 * @vm: requested vm
754 * @start: start of GPU address range
755 * @end: end of GPU address range
756 * @dst: destination address to map to
757 * @flags: mapping flags
758 *
759 * Update the page tables in the range @start - @end (cayman+).
760 *
761 * Global and local mutex must be locked!
762 */
763static void radeon_vm_update_ptes(struct radeon_device *rdev,
764 struct radeon_vm *vm,
765 struct radeon_ib *ib,
766 uint64_t start, uint64_t end,
767 uint64_t dst, uint32_t flags)
768{
4510fb98 769 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
2280ab57
CK
770 uint64_t last_pte = ~0, last_dst = ~0;
771 unsigned count = 0;
772 uint64_t addr;
773
2280ab57
CK
774 /* walk over the address space and update the page tables */
775 for (addr = start; addr < end; ) {
4510fb98 776 uint64_t pt_idx = addr >> radeon_vm_block_size;
37903b5e 777 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
2280ab57
CK
778 unsigned nptes;
779 uint64_t pte;
780
37903b5e
CK
781 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
782
2280ab57
CK
783 if ((addr & ~mask) == (end & ~mask))
784 nptes = end - addr;
785 else
786 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
787
37903b5e 788 pte = radeon_bo_gpu_offset(pt);
2280ab57
CK
789 pte += (addr & mask) * 8;
790
791 if ((last_pte + 8 * count) != pte) {
792
793 if (count) {
ec3dbbcb
CK
794 radeon_vm_frag_ptes(rdev, ib, last_pte,
795 last_pte + 8 * count,
796 last_dst, flags);
2280ab57
CK
797 }
798
799 count = nptes;
800 last_pte = pte;
801 last_dst = dst;
802 } else {
803 count += nptes;
804 }
805
806 addr += nptes;
807 dst += nptes * RADEON_GPU_PAGE_SIZE;
808 }
809
810 if (count) {
ec3dbbcb
CK
811 radeon_vm_frag_ptes(rdev, ib, last_pte,
812 last_pte + 8 * count,
813 last_dst, flags);
2280ab57
CK
814 }
815}
816
817/**
818 * radeon_vm_bo_update - map a bo into the vm page table
819 *
820 * @rdev: radeon_device pointer
821 * @vm: requested vm
822 * @bo: radeon buffer object
823 * @mem: ttm mem
824 *
825 * Fill in the page table entries for @bo (cayman+).
826 * Returns 0 for success, -EINVAL for failure.
827 *
529364e0 828 * Object have to be reserved and mutex must be locked!
2280ab57
CK
829 */
830int radeon_vm_bo_update(struct radeon_device *rdev,
036bf46a 831 struct radeon_bo_va *bo_va,
2280ab57
CK
832 struct ttm_mem_reg *mem)
833{
036bf46a 834 struct radeon_vm *vm = bo_va->vm;
2280ab57 835 struct radeon_ib ib;
6d2f2944 836 unsigned nptes, ndw;
2280ab57
CK
837 uint64_t addr;
838 int r;
839
0aea5e4a 840 if (!bo_va->it.start) {
2280ab57 841 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
036bf46a 842 bo_va->bo, vm);
2280ab57
CK
843 return -EINVAL;
844 }
845
e31ad969 846 list_del_init(&bo_va->vm_status);
2280ab57
CK
847
848 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
849 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
02376d82 850 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
2280ab57
CK
851 if (mem) {
852 addr = mem->start << PAGE_SHIFT;
853 if (mem->mem_type != TTM_PL_SYSTEM) {
854 bo_va->flags |= RADEON_VM_PAGE_VALID;
2280ab57
CK
855 }
856 if (mem->mem_type == TTM_PL_TT) {
857 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
02376d82
MD
858 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
859 bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
860
2280ab57
CK
861 } else {
862 addr += rdev->vm_manager.vram_base_offset;
863 }
864 } else {
865 addr = 0;
2280ab57
CK
866 }
867
e31ad969
CK
868 if (addr == bo_va->addr)
869 return 0;
870 bo_va->addr = addr;
871
2280ab57
CK
872 trace_radeon_vm_bo_update(bo_va);
873
0aea5e4a 874 nptes = bo_va->it.last - bo_va->it.start + 1;
2280ab57 875
2280ab57
CK
876 /* padding, etc. */
877 ndw = 64;
878
4510fb98 879 if (radeon_vm_block_size > 11)
2280ab57
CK
880 /* reserve space for one header for every 2k dwords */
881 ndw += (nptes >> 11) * 4;
882 else
883 /* reserve space for one header for
884 every (1 << BLOCK_SIZE) entries */
4510fb98 885 ndw += (nptes >> radeon_vm_block_size) * 4;
2280ab57
CK
886
887 /* reserve space for pte addresses */
888 ndw += nptes * 2;
889
2280ab57
CK
890 /* update too big for an IB */
891 if (ndw > 0xfffff)
892 return -ENOMEM;
893
894 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
895 if (r)
896 return r;
897 ib.length_dw = 0;
898
0aea5e4a
AD
899 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start,
900 bo_va->it.last + 1, addr,
901 radeon_vm_page_flags(bo_va->flags));
2280ab57
CK
902
903 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
904 r = radeon_ib_schedule(rdev, &ib, NULL);
905 if (r) {
906 radeon_ib_free(rdev, &ib);
907 return r;
908 }
909 radeon_fence_unref(&vm->fence);
910 vm->fence = radeon_fence_ref(ib.fence);
911 radeon_ib_free(rdev, &ib);
912 radeon_fence_unref(&vm->last_flush);
913
914 return 0;
915}
916
036bf46a
CK
917/**
918 * radeon_vm_clear_freed - clear freed BOs in the PT
919 *
920 * @rdev: radeon_device pointer
921 * @vm: requested vm
922 *
923 * Make sure all freed BOs are cleared in the PT.
924 * Returns 0 for success.
925 *
926 * PTs have to be reserved and mutex must be locked!
927 */
928int radeon_vm_clear_freed(struct radeon_device *rdev,
929 struct radeon_vm *vm)
930{
931 struct radeon_bo_va *bo_va, *tmp;
932 int r;
933
934 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
036bf46a
CK
935 r = radeon_vm_bo_update(rdev, bo_va, NULL);
936 kfree(bo_va);
937 if (r)
938 return r;
939 }
940 return 0;
941
942}
943
e31ad969
CK
944/**
945 * radeon_vm_clear_invalids - clear invalidated BOs in the PT
946 *
947 * @rdev: radeon_device pointer
948 * @vm: requested vm
949 *
950 * Make sure all invalidated BOs are cleared in the PT.
951 * Returns 0 for success.
952 *
953 * PTs have to be reserved and mutex must be locked!
954 */
955int radeon_vm_clear_invalids(struct radeon_device *rdev,
956 struct radeon_vm *vm)
957{
958 struct radeon_bo_va *bo_va, *tmp;
959 int r;
960
961 list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, vm_status) {
962 r = radeon_vm_bo_update(rdev, bo_va, NULL);
963 if (r)
964 return r;
965 }
966 return 0;
967}
968
2280ab57
CK
969/**
970 * radeon_vm_bo_rmv - remove a bo to a specific vm
971 *
972 * @rdev: radeon_device pointer
973 * @bo_va: requested bo_va
974 *
975 * Remove @bo_va->bo from the requested vm (cayman+).
2280ab57
CK
976 *
977 * Object have to be reserved!
978 */
036bf46a
CK
979void radeon_vm_bo_rmv(struct radeon_device *rdev,
980 struct radeon_bo_va *bo_va)
2280ab57 981{
036bf46a 982 struct radeon_vm *vm = bo_va->vm;
2280ab57 983
036bf46a 984 list_del(&bo_va->bo_list);
529364e0 985
036bf46a 986 mutex_lock(&vm->mutex);
0aea5e4a 987 interval_tree_remove(&bo_va->it, &vm->va);
e31ad969 988 list_del(&bo_va->vm_status);
2280ab57 989
e31ad969 990 if (bo_va->addr) {
036bf46a
CK
991 bo_va->bo = NULL;
992 list_add(&bo_va->vm_status, &vm->freed);
993 } else {
994 kfree(bo_va);
995 }
996
997 mutex_unlock(&vm->mutex);
2280ab57
CK
998}
999
1000/**
1001 * radeon_vm_bo_invalidate - mark the bo as invalid
1002 *
1003 * @rdev: radeon_device pointer
1004 * @vm: requested vm
1005 * @bo: radeon buffer object
1006 *
1007 * Mark @bo as invalid (cayman+).
1008 */
1009void radeon_vm_bo_invalidate(struct radeon_device *rdev,
1010 struct radeon_bo *bo)
1011{
1012 struct radeon_bo_va *bo_va;
1013
1014 list_for_each_entry(bo_va, &bo->va, bo_list) {
e31ad969
CK
1015 if (bo_va->addr) {
1016 mutex_lock(&bo_va->vm->mutex);
1017 list_del(&bo_va->vm_status);
1018 list_add(&bo_va->vm_status, &bo_va->vm->invalidated);
1019 mutex_unlock(&bo_va->vm->mutex);
1020 }
2280ab57
CK
1021 }
1022}
1023
1024/**
1025 * radeon_vm_init - initialize a vm instance
1026 *
1027 * @rdev: radeon_device pointer
1028 * @vm: requested vm
1029 *
1030 * Init @vm fields (cayman+).
1031 */
6d2f2944 1032int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
2280ab57 1033{
1c89d27f
CK
1034 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1035 RADEON_VM_PTE_COUNT * 8);
6d2f2944
CK
1036 unsigned pd_size, pd_entries, pts_size;
1037 int r;
1038
2280ab57 1039 vm->id = 0;
cc9e67e3 1040 vm->ib_bo_va = NULL;
2280ab57
CK
1041 vm->fence = NULL;
1042 vm->last_flush = NULL;
1043 vm->last_id_use = NULL;
1044 mutex_init(&vm->mutex);
0aea5e4a 1045 vm->va = RB_ROOT;
e31ad969 1046 INIT_LIST_HEAD(&vm->invalidated);
036bf46a 1047 INIT_LIST_HEAD(&vm->freed);
6d2f2944
CK
1048
1049 pd_size = radeon_vm_directory_size(rdev);
1050 pd_entries = radeon_vm_num_pdes(rdev);
1051
1052 /* allocate page table array */
1053 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1054 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1055 if (vm->page_tables == NULL) {
1056 DRM_ERROR("Cannot allocate memory for page table array\n");
1057 return -ENOMEM;
1058 }
1059
7dae77f8 1060 r = radeon_bo_create(rdev, pd_size, align, true,
02376d82 1061 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
6d2f2944
CK
1062 &vm->page_directory);
1063 if (r)
1064 return r;
1065
1066 r = radeon_vm_clear_bo(rdev, vm->page_directory);
1067 if (r) {
1068 radeon_bo_unref(&vm->page_directory);
1069 vm->page_directory = NULL;
1070 return r;
1071 }
1072
1073 return 0;
2280ab57
CK
1074}
1075
1076/**
1077 * radeon_vm_fini - tear down a vm instance
1078 *
1079 * @rdev: radeon_device pointer
1080 * @vm: requested vm
1081 *
1082 * Tear down @vm (cayman+).
1083 * Unbind the VM and remove all bos from the vm bo list
1084 */
1085void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1086{
1087 struct radeon_bo_va *bo_va, *tmp;
6d2f2944 1088 int i, r;
2280ab57 1089
0aea5e4a 1090 if (!RB_EMPTY_ROOT(&vm->va)) {
2280ab57
CK
1091 dev_err(rdev->dev, "still active bo inside vm\n");
1092 }
0aea5e4a
AD
1093 rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) {
1094 interval_tree_remove(&bo_va->it, &vm->va);
2280ab57
CK
1095 r = radeon_bo_reserve(bo_va->bo, false);
1096 if (!r) {
1097 list_del_init(&bo_va->bo_list);
1098 radeon_bo_unreserve(bo_va->bo);
1099 kfree(bo_va);
1100 }
1101 }
036bf46a
CK
1102 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
1103 kfree(bo_va);
6d2f2944
CK
1104
1105 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1106 radeon_bo_unref(&vm->page_tables[i].bo);
1107 kfree(vm->page_tables);
1108
1109 radeon_bo_unref(&vm->page_directory);
1110
2280ab57
CK
1111 radeon_fence_unref(&vm->fence);
1112 radeon_fence_unref(&vm->last_flush);
1113 radeon_fence_unref(&vm->last_id_use);
6d2f2944
CK
1114
1115 mutex_destroy(&vm->mutex);
2280ab57 1116}
This page took 0.136231 seconds and 5 git commands to generate.