drm/radeon: Allow write-combined CPU mappings of BOs in GTT (v2)
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_vm.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 #include "radeon_trace.h"
32
33 /*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53 /**
54 * radeon_vm_num_pde - return the number of page directory entries
55 *
56 * @rdev: radeon_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61 {
62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size;
63 }
64
65 /**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @rdev: radeon_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73 {
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75 }
76
77 /**
78 * radeon_vm_manager_init - init the vm manager
79 *
80 * @rdev: radeon_device pointer
81 *
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
84 */
85 int radeon_vm_manager_init(struct radeon_device *rdev)
86 {
87 int r;
88
89 if (!rdev->vm_manager.enabled) {
90 r = radeon_asic_vm_init(rdev);
91 if (r)
92 return r;
93
94 rdev->vm_manager.enabled = true;
95 }
96 return 0;
97 }
98
99 /**
100 * radeon_vm_manager_fini - tear down the vm manager
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Tear down the VM manager (cayman+).
105 */
106 void radeon_vm_manager_fini(struct radeon_device *rdev)
107 {
108 int i;
109
110 if (!rdev->vm_manager.enabled)
111 return;
112
113 for (i = 0; i < RADEON_NUM_VM; ++i)
114 radeon_fence_unref(&rdev->vm_manager.active[i]);
115 radeon_asic_vm_fini(rdev);
116 rdev->vm_manager.enabled = false;
117 }
118
119 /**
120 * radeon_vm_get_bos - add the vm BOs to a validation list
121 *
122 * @vm: vm providing the BOs
123 * @head: head of validation list
124 *
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
127 */
128 struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm,
130 struct list_head *head)
131 {
132 struct radeon_cs_reloc *list;
133 unsigned i, idx;
134
135 list = kmalloc_array(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc), GFP_KERNEL);
137 if (!list)
138 return NULL;
139
140 /* add the vm page table to the list */
141 list[0].gobj = NULL;
142 list[0].robj = vm->page_directory;
143 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
144 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
145 list[0].tv.bo = &vm->page_directory->tbo;
146 list[0].tiling_flags = 0;
147 list[0].handle = 0;
148 list_add(&list[0].tv.head, head);
149
150 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
151 if (!vm->page_tables[i].bo)
152 continue;
153
154 list[idx].gobj = NULL;
155 list[idx].robj = vm->page_tables[i].bo;
156 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
157 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
158 list[idx].tv.bo = &list[idx].robj->tbo;
159 list[idx].tiling_flags = 0;
160 list[idx].handle = 0;
161 list_add(&list[idx++].tv.head, head);
162 }
163
164 return list;
165 }
166
167 /**
168 * radeon_vm_grab_id - allocate the next free VMID
169 *
170 * @rdev: radeon_device pointer
171 * @vm: vm to allocate id for
172 * @ring: ring we want to submit job to
173 *
174 * Allocate an id for the vm (cayman+).
175 * Returns the fence we need to sync to (if any).
176 *
177 * Global and local mutex must be locked!
178 */
179 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
180 struct radeon_vm *vm, int ring)
181 {
182 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
183 unsigned choices[2] = {};
184 unsigned i;
185
186 /* check if the id is still valid */
187 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
188 return NULL;
189
190 /* we definately need to flush */
191 radeon_fence_unref(&vm->last_flush);
192
193 /* skip over VMID 0, since it is the system VM */
194 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
195 struct radeon_fence *fence = rdev->vm_manager.active[i];
196
197 if (fence == NULL) {
198 /* found a free one */
199 vm->id = i;
200 trace_radeon_vm_grab_id(vm->id, ring);
201 return NULL;
202 }
203
204 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
205 best[fence->ring] = fence;
206 choices[fence->ring == ring ? 0 : 1] = i;
207 }
208 }
209
210 for (i = 0; i < 2; ++i) {
211 if (choices[i]) {
212 vm->id = choices[i];
213 trace_radeon_vm_grab_id(vm->id, ring);
214 return rdev->vm_manager.active[choices[i]];
215 }
216 }
217
218 /* should never happen */
219 BUG();
220 return NULL;
221 }
222
223 /**
224 * radeon_vm_flush - hardware flush the vm
225 *
226 * @rdev: radeon_device pointer
227 * @vm: vm we want to flush
228 * @ring: ring to use for flush
229 *
230 * Flush the vm (cayman+).
231 *
232 * Global and local mutex must be locked!
233 */
234 void radeon_vm_flush(struct radeon_device *rdev,
235 struct radeon_vm *vm,
236 int ring)
237 {
238 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
239
240 /* if we can't remember our last VM flush then flush now! */
241 /* XXX figure out why we have to flush all the time */
242 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
243 vm->pd_gpu_addr = pd_addr;
244 radeon_ring_vm_flush(rdev, ring, vm);
245 }
246 }
247
248 /**
249 * radeon_vm_fence - remember fence for vm
250 *
251 * @rdev: radeon_device pointer
252 * @vm: vm we want to fence
253 * @fence: fence to remember
254 *
255 * Fence the vm (cayman+).
256 * Set the fence used to protect page table and id.
257 *
258 * Global and local mutex must be locked!
259 */
260 void radeon_vm_fence(struct radeon_device *rdev,
261 struct radeon_vm *vm,
262 struct radeon_fence *fence)
263 {
264 radeon_fence_unref(&vm->fence);
265 vm->fence = radeon_fence_ref(fence);
266
267 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
268 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
269
270 radeon_fence_unref(&vm->last_id_use);
271 vm->last_id_use = radeon_fence_ref(fence);
272
273 /* we just flushed the VM, remember that */
274 if (!vm->last_flush)
275 vm->last_flush = radeon_fence_ref(fence);
276 }
277
278 /**
279 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
280 *
281 * @vm: requested vm
282 * @bo: requested buffer object
283 *
284 * Find @bo inside the requested vm (cayman+).
285 * Search inside the @bos vm list for the requested vm
286 * Returns the found bo_va or NULL if none is found
287 *
288 * Object has to be reserved!
289 */
290 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
291 struct radeon_bo *bo)
292 {
293 struct radeon_bo_va *bo_va;
294
295 list_for_each_entry(bo_va, &bo->va, bo_list) {
296 if (bo_va->vm == vm) {
297 return bo_va;
298 }
299 }
300 return NULL;
301 }
302
303 /**
304 * radeon_vm_bo_add - add a bo to a specific vm
305 *
306 * @rdev: radeon_device pointer
307 * @vm: requested vm
308 * @bo: radeon buffer object
309 *
310 * Add @bo into the requested vm (cayman+).
311 * Add @bo to the list of bos associated with the vm
312 * Returns newly added bo_va or NULL for failure
313 *
314 * Object has to be reserved!
315 */
316 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
317 struct radeon_vm *vm,
318 struct radeon_bo *bo)
319 {
320 struct radeon_bo_va *bo_va;
321
322 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
323 if (bo_va == NULL) {
324 return NULL;
325 }
326 bo_va->vm = vm;
327 bo_va->bo = bo;
328 bo_va->soffset = 0;
329 bo_va->eoffset = 0;
330 bo_va->flags = 0;
331 bo_va->valid = false;
332 bo_va->ref_count = 1;
333 INIT_LIST_HEAD(&bo_va->bo_list);
334 INIT_LIST_HEAD(&bo_va->vm_list);
335 INIT_LIST_HEAD(&bo_va->vm_status);
336
337 mutex_lock(&vm->mutex);
338 list_add(&bo_va->vm_list, &vm->va);
339 list_add_tail(&bo_va->bo_list, &bo->va);
340 mutex_unlock(&vm->mutex);
341
342 return bo_va;
343 }
344
345 /**
346 * radeon_vm_clear_bo - initially clear the page dir/table
347 *
348 * @rdev: radeon_device pointer
349 * @bo: bo to clear
350 */
351 static int radeon_vm_clear_bo(struct radeon_device *rdev,
352 struct radeon_bo *bo)
353 {
354 struct ttm_validate_buffer tv;
355 struct ww_acquire_ctx ticket;
356 struct list_head head;
357 struct radeon_ib ib;
358 unsigned entries;
359 uint64_t addr;
360 int r;
361
362 memset(&tv, 0, sizeof(tv));
363 tv.bo = &bo->tbo;
364
365 INIT_LIST_HEAD(&head);
366 list_add(&tv.head, &head);
367
368 r = ttm_eu_reserve_buffers(&ticket, &head);
369 if (r)
370 return r;
371
372 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
373 if (r)
374 goto error;
375
376 addr = radeon_bo_gpu_offset(bo);
377 entries = radeon_bo_size(bo) / 8;
378
379 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
380 NULL, entries * 2 + 64);
381 if (r)
382 goto error;
383
384 ib.length_dw = 0;
385
386 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
387
388 r = radeon_ib_schedule(rdev, &ib, NULL);
389 if (r)
390 goto error;
391
392 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
393 radeon_ib_free(rdev, &ib);
394
395 return 0;
396
397 error:
398 ttm_eu_backoff_reservation(&ticket, &head);
399 return r;
400 }
401
402 /**
403 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
404 *
405 * @rdev: radeon_device pointer
406 * @bo_va: bo_va to store the address
407 * @soffset: requested offset of the buffer in the VM address space
408 * @flags: attributes of pages (read/write/valid/etc.)
409 *
410 * Set offset of @bo_va (cayman+).
411 * Validate and set the offset requested within the vm address space.
412 * Returns 0 for success, error for failure.
413 *
414 * Object has to be reserved!
415 */
416 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
417 struct radeon_bo_va *bo_va,
418 uint64_t soffset,
419 uint32_t flags)
420 {
421 uint64_t size = radeon_bo_size(bo_va->bo);
422 uint64_t eoffset, last_offset = 0;
423 struct radeon_vm *vm = bo_va->vm;
424 struct radeon_bo_va *tmp;
425 struct list_head *head;
426 unsigned last_pfn, pt_idx;
427 int r;
428
429 if (soffset) {
430 /* make sure object fit at this offset */
431 eoffset = soffset + size;
432 if (soffset >= eoffset) {
433 return -EINVAL;
434 }
435
436 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
437 if (last_pfn > rdev->vm_manager.max_pfn) {
438 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
439 last_pfn, rdev->vm_manager.max_pfn);
440 return -EINVAL;
441 }
442
443 } else {
444 eoffset = last_pfn = 0;
445 }
446
447 mutex_lock(&vm->mutex);
448 head = &vm->va;
449 last_offset = 0;
450 list_for_each_entry(tmp, &vm->va, vm_list) {
451 if (bo_va == tmp) {
452 /* skip over currently modified bo */
453 continue;
454 }
455
456 if (soffset >= last_offset && eoffset <= tmp->soffset) {
457 /* bo can be added before this one */
458 break;
459 }
460 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
461 /* bo and tmp overlap, invalid offset */
462 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
463 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
464 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
465 mutex_unlock(&vm->mutex);
466 return -EINVAL;
467 }
468 last_offset = tmp->eoffset;
469 head = &tmp->vm_list;
470 }
471
472 if (bo_va->soffset) {
473 /* add a clone of the bo_va to clear the old address */
474 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
475 if (!tmp) {
476 mutex_unlock(&vm->mutex);
477 return -ENOMEM;
478 }
479 tmp->soffset = bo_va->soffset;
480 tmp->eoffset = bo_va->eoffset;
481 tmp->vm = vm;
482 list_add(&tmp->vm_status, &vm->freed);
483 }
484
485 bo_va->soffset = soffset;
486 bo_va->eoffset = eoffset;
487 bo_va->flags = flags;
488 bo_va->valid = false;
489 list_move(&bo_va->vm_list, head);
490
491 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
492 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> radeon_vm_block_size;
493
494 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev));
495
496 if (eoffset > vm->max_pde_used)
497 vm->max_pde_used = eoffset;
498
499 radeon_bo_unreserve(bo_va->bo);
500
501 /* walk over the address space and allocate the page tables */
502 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
503 struct radeon_bo *pt;
504
505 if (vm->page_tables[pt_idx].bo)
506 continue;
507
508 /* drop mutex to allocate and clear page table */
509 mutex_unlock(&vm->mutex);
510
511 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
512 RADEON_GPU_PAGE_SIZE, true,
513 RADEON_GEM_DOMAIN_VRAM, 0, NULL, &pt);
514 if (r)
515 return r;
516
517 r = radeon_vm_clear_bo(rdev, pt);
518 if (r) {
519 radeon_bo_unref(&pt);
520 radeon_bo_reserve(bo_va->bo, false);
521 return r;
522 }
523
524 /* aquire mutex again */
525 mutex_lock(&vm->mutex);
526 if (vm->page_tables[pt_idx].bo) {
527 /* someone else allocated the pt in the meantime */
528 mutex_unlock(&vm->mutex);
529 radeon_bo_unref(&pt);
530 mutex_lock(&vm->mutex);
531 continue;
532 }
533
534 vm->page_tables[pt_idx].addr = 0;
535 vm->page_tables[pt_idx].bo = pt;
536 }
537
538 mutex_unlock(&vm->mutex);
539 return radeon_bo_reserve(bo_va->bo, false);
540 }
541
542 /**
543 * radeon_vm_map_gart - get the physical address of a gart page
544 *
545 * @rdev: radeon_device pointer
546 * @addr: the unmapped addr
547 *
548 * Look up the physical address of the page that the pte resolves
549 * to (cayman+).
550 * Returns the physical address of the page.
551 */
552 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
553 {
554 uint64_t result;
555
556 /* page table offset */
557 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
558
559 /* in case cpu page size != gpu page size*/
560 result |= addr & (~PAGE_MASK);
561
562 return result;
563 }
564
565 /**
566 * radeon_vm_page_flags - translate page flags to what the hw uses
567 *
568 * @flags: flags comming from userspace
569 *
570 * Translate the flags the userspace ABI uses to hw flags.
571 */
572 static uint32_t radeon_vm_page_flags(uint32_t flags)
573 {
574 uint32_t hw_flags = 0;
575 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
576 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
577 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
578 if (flags & RADEON_VM_PAGE_SYSTEM) {
579 hw_flags |= R600_PTE_SYSTEM;
580 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
581 }
582 return hw_flags;
583 }
584
585 /**
586 * radeon_vm_update_pdes - make sure that page directory is valid
587 *
588 * @rdev: radeon_device pointer
589 * @vm: requested vm
590 * @start: start of GPU address range
591 * @end: end of GPU address range
592 *
593 * Allocates new page tables if necessary
594 * and updates the page directory (cayman+).
595 * Returns 0 for success, error for failure.
596 *
597 * Global and local mutex must be locked!
598 */
599 int radeon_vm_update_page_directory(struct radeon_device *rdev,
600 struct radeon_vm *vm)
601 {
602 struct radeon_bo *pd = vm->page_directory;
603 uint64_t pd_addr = radeon_bo_gpu_offset(pd);
604 uint32_t incr = RADEON_VM_PTE_COUNT * 8;
605 uint64_t last_pde = ~0, last_pt = ~0;
606 unsigned count = 0, pt_idx, ndw;
607 struct radeon_ib ib;
608 int r;
609
610 /* padding, etc. */
611 ndw = 64;
612
613 /* assume the worst case */
614 ndw += vm->max_pde_used * 16;
615
616 /* update too big for an IB */
617 if (ndw > 0xfffff)
618 return -ENOMEM;
619
620 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
621 if (r)
622 return r;
623 ib.length_dw = 0;
624
625 /* walk over the address space and update the page directory */
626 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
627 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
628 uint64_t pde, pt;
629
630 if (bo == NULL)
631 continue;
632
633 pt = radeon_bo_gpu_offset(bo);
634 if (vm->page_tables[pt_idx].addr == pt)
635 continue;
636 vm->page_tables[pt_idx].addr = pt;
637
638 pde = pd_addr + pt_idx * 8;
639 if (((last_pde + 8 * count) != pde) ||
640 ((last_pt + incr * count) != pt)) {
641
642 if (count) {
643 radeon_asic_vm_set_page(rdev, &ib, last_pde,
644 last_pt, count, incr,
645 R600_PTE_VALID);
646 }
647
648 count = 1;
649 last_pde = pde;
650 last_pt = pt;
651 } else {
652 ++count;
653 }
654 }
655
656 if (count)
657 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
658 incr, R600_PTE_VALID);
659
660 if (ib.length_dw != 0) {
661 radeon_semaphore_sync_to(ib.semaphore, pd->tbo.sync_obj);
662 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
663 r = radeon_ib_schedule(rdev, &ib, NULL);
664 if (r) {
665 radeon_ib_free(rdev, &ib);
666 return r;
667 }
668 radeon_fence_unref(&vm->fence);
669 vm->fence = radeon_fence_ref(ib.fence);
670 radeon_fence_unref(&vm->last_flush);
671 }
672 radeon_ib_free(rdev, &ib);
673
674 return 0;
675 }
676
677 /**
678 * radeon_vm_frag_ptes - add fragment information to PTEs
679 *
680 * @rdev: radeon_device pointer
681 * @ib: IB for the update
682 * @pe_start: first PTE to handle
683 * @pe_end: last PTE to handle
684 * @addr: addr those PTEs should point to
685 * @flags: hw mapping flags
686 *
687 * Global and local mutex must be locked!
688 */
689 static void radeon_vm_frag_ptes(struct radeon_device *rdev,
690 struct radeon_ib *ib,
691 uint64_t pe_start, uint64_t pe_end,
692 uint64_t addr, uint32_t flags)
693 {
694 /**
695 * The MC L1 TLB supports variable sized pages, based on a fragment
696 * field in the PTE. When this field is set to a non-zero value, page
697 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
698 * flags are considered valid for all PTEs within the fragment range
699 * and corresponding mappings are assumed to be physically contiguous.
700 *
701 * The L1 TLB can store a single PTE for the whole fragment,
702 * significantly increasing the space available for translation
703 * caching. This leads to large improvements in throughput when the
704 * TLB is under pressure.
705 *
706 * The L2 TLB distributes small and large fragments into two
707 * asymmetric partitions. The large fragment cache is significantly
708 * larger. Thus, we try to use large fragments wherever possible.
709 * Userspace can support this by aligning virtual base address and
710 * allocation size to the fragment size.
711 */
712
713 /* NI is optimized for 256KB fragments, SI and newer for 64KB */
714 uint64_t frag_flags = rdev->family == CHIP_CAYMAN ?
715 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB;
716 uint64_t frag_align = rdev->family == CHIP_CAYMAN ? 0x200 : 0x80;
717
718 uint64_t frag_start = ALIGN(pe_start, frag_align);
719 uint64_t frag_end = pe_end & ~(frag_align - 1);
720
721 unsigned count;
722
723 /* system pages are non continuously */
724 if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) ||
725 (frag_start >= frag_end)) {
726
727 count = (pe_end - pe_start) / 8;
728 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
729 RADEON_GPU_PAGE_SIZE, flags);
730 return;
731 }
732
733 /* handle the 4K area at the beginning */
734 if (pe_start != frag_start) {
735 count = (frag_start - pe_start) / 8;
736 radeon_asic_vm_set_page(rdev, ib, pe_start, addr, count,
737 RADEON_GPU_PAGE_SIZE, flags);
738 addr += RADEON_GPU_PAGE_SIZE * count;
739 }
740
741 /* handle the area in the middle */
742 count = (frag_end - frag_start) / 8;
743 radeon_asic_vm_set_page(rdev, ib, frag_start, addr, count,
744 RADEON_GPU_PAGE_SIZE, flags | frag_flags);
745
746 /* handle the 4K area at the end */
747 if (frag_end != pe_end) {
748 addr += RADEON_GPU_PAGE_SIZE * count;
749 count = (pe_end - frag_end) / 8;
750 radeon_asic_vm_set_page(rdev, ib, frag_end, addr, count,
751 RADEON_GPU_PAGE_SIZE, flags);
752 }
753 }
754
755 /**
756 * radeon_vm_update_ptes - make sure that page tables are valid
757 *
758 * @rdev: radeon_device pointer
759 * @vm: requested vm
760 * @start: start of GPU address range
761 * @end: end of GPU address range
762 * @dst: destination address to map to
763 * @flags: mapping flags
764 *
765 * Update the page tables in the range @start - @end (cayman+).
766 *
767 * Global and local mutex must be locked!
768 */
769 static void radeon_vm_update_ptes(struct radeon_device *rdev,
770 struct radeon_vm *vm,
771 struct radeon_ib *ib,
772 uint64_t start, uint64_t end,
773 uint64_t dst, uint32_t flags)
774 {
775 uint64_t mask = RADEON_VM_PTE_COUNT - 1;
776 uint64_t last_pte = ~0, last_dst = ~0;
777 unsigned count = 0;
778 uint64_t addr;
779
780 start = start / RADEON_GPU_PAGE_SIZE;
781 end = end / RADEON_GPU_PAGE_SIZE;
782
783 /* walk over the address space and update the page tables */
784 for (addr = start; addr < end; ) {
785 uint64_t pt_idx = addr >> radeon_vm_block_size;
786 struct radeon_bo *pt = vm->page_tables[pt_idx].bo;
787 unsigned nptes;
788 uint64_t pte;
789
790 radeon_semaphore_sync_to(ib->semaphore, pt->tbo.sync_obj);
791
792 if ((addr & ~mask) == (end & ~mask))
793 nptes = end - addr;
794 else
795 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
796
797 pte = radeon_bo_gpu_offset(pt);
798 pte += (addr & mask) * 8;
799
800 if ((last_pte + 8 * count) != pte) {
801
802 if (count) {
803 radeon_vm_frag_ptes(rdev, ib, last_pte,
804 last_pte + 8 * count,
805 last_dst, flags);
806 }
807
808 count = nptes;
809 last_pte = pte;
810 last_dst = dst;
811 } else {
812 count += nptes;
813 }
814
815 addr += nptes;
816 dst += nptes * RADEON_GPU_PAGE_SIZE;
817 }
818
819 if (count) {
820 radeon_vm_frag_ptes(rdev, ib, last_pte,
821 last_pte + 8 * count,
822 last_dst, flags);
823 }
824 }
825
826 /**
827 * radeon_vm_bo_update - map a bo into the vm page table
828 *
829 * @rdev: radeon_device pointer
830 * @vm: requested vm
831 * @bo: radeon buffer object
832 * @mem: ttm mem
833 *
834 * Fill in the page table entries for @bo (cayman+).
835 * Returns 0 for success, -EINVAL for failure.
836 *
837 * Object have to be reserved and mutex must be locked!
838 */
839 int radeon_vm_bo_update(struct radeon_device *rdev,
840 struct radeon_bo_va *bo_va,
841 struct ttm_mem_reg *mem)
842 {
843 struct radeon_vm *vm = bo_va->vm;
844 struct radeon_ib ib;
845 unsigned nptes, ndw;
846 uint64_t addr;
847 int r;
848
849
850 if (!bo_va->soffset) {
851 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
852 bo_va->bo, vm);
853 return -EINVAL;
854 }
855
856 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
857 return 0;
858
859 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
860 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
861 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED;
862 if (mem) {
863 addr = mem->start << PAGE_SHIFT;
864 if (mem->mem_type != TTM_PL_SYSTEM) {
865 bo_va->flags |= RADEON_VM_PAGE_VALID;
866 bo_va->valid = true;
867 }
868 if (mem->mem_type == TTM_PL_TT) {
869 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
870 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC)))
871 bo_va->flags |= RADEON_VM_PAGE_SNOOPED;
872
873 } else {
874 addr += rdev->vm_manager.vram_base_offset;
875 }
876 } else {
877 addr = 0;
878 bo_va->valid = false;
879 }
880
881 trace_radeon_vm_bo_update(bo_va);
882
883 nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
884
885 /* padding, etc. */
886 ndw = 64;
887
888 if (radeon_vm_block_size > 11)
889 /* reserve space for one header for every 2k dwords */
890 ndw += (nptes >> 11) * 4;
891 else
892 /* reserve space for one header for
893 every (1 << BLOCK_SIZE) entries */
894 ndw += (nptes >> radeon_vm_block_size) * 4;
895
896 /* reserve space for pte addresses */
897 ndw += nptes * 2;
898
899 /* update too big for an IB */
900 if (ndw > 0xfffff)
901 return -ENOMEM;
902
903 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
904 if (r)
905 return r;
906 ib.length_dw = 0;
907
908 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
909 addr, radeon_vm_page_flags(bo_va->flags));
910
911 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
912 r = radeon_ib_schedule(rdev, &ib, NULL);
913 if (r) {
914 radeon_ib_free(rdev, &ib);
915 return r;
916 }
917 radeon_fence_unref(&vm->fence);
918 vm->fence = radeon_fence_ref(ib.fence);
919 radeon_ib_free(rdev, &ib);
920 radeon_fence_unref(&vm->last_flush);
921
922 return 0;
923 }
924
925 /**
926 * radeon_vm_clear_freed - clear freed BOs in the PT
927 *
928 * @rdev: radeon_device pointer
929 * @vm: requested vm
930 *
931 * Make sure all freed BOs are cleared in the PT.
932 * Returns 0 for success.
933 *
934 * PTs have to be reserved and mutex must be locked!
935 */
936 int radeon_vm_clear_freed(struct radeon_device *rdev,
937 struct radeon_vm *vm)
938 {
939 struct radeon_bo_va *bo_va, *tmp;
940 int r;
941
942 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
943 list_del(&bo_va->vm_status);
944 r = radeon_vm_bo_update(rdev, bo_va, NULL);
945 kfree(bo_va);
946 if (r)
947 return r;
948 }
949 return 0;
950
951 }
952
953 /**
954 * radeon_vm_bo_rmv - remove a bo to a specific vm
955 *
956 * @rdev: radeon_device pointer
957 * @bo_va: requested bo_va
958 *
959 * Remove @bo_va->bo from the requested vm (cayman+).
960 *
961 * Object have to be reserved!
962 */
963 void radeon_vm_bo_rmv(struct radeon_device *rdev,
964 struct radeon_bo_va *bo_va)
965 {
966 struct radeon_vm *vm = bo_va->vm;
967
968 list_del(&bo_va->bo_list);
969
970 mutex_lock(&vm->mutex);
971 list_del(&bo_va->vm_list);
972
973 if (bo_va->soffset) {
974 bo_va->bo = NULL;
975 list_add(&bo_va->vm_status, &vm->freed);
976 } else {
977 kfree(bo_va);
978 }
979
980 mutex_unlock(&vm->mutex);
981 }
982
983 /**
984 * radeon_vm_bo_invalidate - mark the bo as invalid
985 *
986 * @rdev: radeon_device pointer
987 * @vm: requested vm
988 * @bo: radeon buffer object
989 *
990 * Mark @bo as invalid (cayman+).
991 */
992 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
993 struct radeon_bo *bo)
994 {
995 struct radeon_bo_va *bo_va;
996
997 list_for_each_entry(bo_va, &bo->va, bo_list) {
998 bo_va->valid = false;
999 }
1000 }
1001
1002 /**
1003 * radeon_vm_init - initialize a vm instance
1004 *
1005 * @rdev: radeon_device pointer
1006 * @vm: requested vm
1007 *
1008 * Init @vm fields (cayman+).
1009 */
1010 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
1011 {
1012 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE,
1013 RADEON_VM_PTE_COUNT * 8);
1014 unsigned pd_size, pd_entries, pts_size;
1015 int r;
1016
1017 vm->id = 0;
1018 vm->ib_bo_va = NULL;
1019 vm->fence = NULL;
1020 vm->last_flush = NULL;
1021 vm->last_id_use = NULL;
1022 mutex_init(&vm->mutex);
1023 INIT_LIST_HEAD(&vm->va);
1024 INIT_LIST_HEAD(&vm->freed);
1025
1026 pd_size = radeon_vm_directory_size(rdev);
1027 pd_entries = radeon_vm_num_pdes(rdev);
1028
1029 /* allocate page table array */
1030 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
1031 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
1032 if (vm->page_tables == NULL) {
1033 DRM_ERROR("Cannot allocate memory for page table array\n");
1034 return -ENOMEM;
1035 }
1036
1037 r = radeon_bo_create(rdev, pd_size, align, true,
1038 RADEON_GEM_DOMAIN_VRAM, 0, NULL,
1039 &vm->page_directory);
1040 if (r)
1041 return r;
1042
1043 r = radeon_vm_clear_bo(rdev, vm->page_directory);
1044 if (r) {
1045 radeon_bo_unref(&vm->page_directory);
1046 vm->page_directory = NULL;
1047 return r;
1048 }
1049
1050 return 0;
1051 }
1052
1053 /**
1054 * radeon_vm_fini - tear down a vm instance
1055 *
1056 * @rdev: radeon_device pointer
1057 * @vm: requested vm
1058 *
1059 * Tear down @vm (cayman+).
1060 * Unbind the VM and remove all bos from the vm bo list
1061 */
1062 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
1063 {
1064 struct radeon_bo_va *bo_va, *tmp;
1065 int i, r;
1066
1067 if (!list_empty(&vm->va)) {
1068 dev_err(rdev->dev, "still active bo inside vm\n");
1069 }
1070 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
1071 list_del_init(&bo_va->vm_list);
1072 r = radeon_bo_reserve(bo_va->bo, false);
1073 if (!r) {
1074 list_del_init(&bo_va->bo_list);
1075 radeon_bo_unreserve(bo_va->bo);
1076 kfree(bo_va);
1077 }
1078 }
1079 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
1080 kfree(bo_va);
1081
1082 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
1083 radeon_bo_unref(&vm->page_tables[i].bo);
1084 kfree(vm->page_tables);
1085
1086 radeon_bo_unref(&vm->page_directory);
1087
1088 radeon_fence_unref(&vm->fence);
1089 radeon_fence_unref(&vm->last_flush);
1090 radeon_fence_unref(&vm->last_id_use);
1091
1092 mutex_destroy(&vm->mutex);
1093 }
This page took 0.079663 seconds and 5 git commands to generate.