81d91b513ce17f2e5af3bd1b696ab321be0cd6e9
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_vm.c
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28 #include <drm/drmP.h>
29 #include <drm/radeon_drm.h>
30 #include "radeon.h"
31 #include "radeon_trace.h"
32
33 /*
34 * GPUVM
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
50 * SI supports 16.
51 */
52
53 /**
54 * radeon_vm_num_pde - return the number of page directory entries
55 *
56 * @rdev: radeon_device pointer
57 *
58 * Calculate the number of page directory entries (cayman+).
59 */
60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev)
61 {
62 return rdev->vm_manager.max_pfn >> RADEON_VM_BLOCK_SIZE;
63 }
64
65 /**
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
67 *
68 * @rdev: radeon_device pointer
69 *
70 * Calculate the size of the page directory in bytes (cayman+).
71 */
72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev)
73 {
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8);
75 }
76
77 /**
78 * radeon_vm_manager_init - init the vm manager
79 *
80 * @rdev: radeon_device pointer
81 *
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
84 */
85 int radeon_vm_manager_init(struct radeon_device *rdev)
86 {
87 int r;
88
89 if (!rdev->vm_manager.enabled) {
90 r = radeon_asic_vm_init(rdev);
91 if (r)
92 return r;
93
94 rdev->vm_manager.enabled = true;
95 }
96 return 0;
97 }
98
99 /**
100 * radeon_vm_manager_fini - tear down the vm manager
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Tear down the VM manager (cayman+).
105 */
106 void radeon_vm_manager_fini(struct radeon_device *rdev)
107 {
108 int i;
109
110 if (!rdev->vm_manager.enabled)
111 return;
112
113 for (i = 0; i < RADEON_NUM_VM; ++i)
114 radeon_fence_unref(&rdev->vm_manager.active[i]);
115 radeon_asic_vm_fini(rdev);
116 rdev->vm_manager.enabled = false;
117 }
118
119 /**
120 * radeon_vm_get_bos - add the vm BOs to a validation list
121 *
122 * @vm: vm providing the BOs
123 * @head: head of validation list
124 *
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
127 */
128 struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
129 struct radeon_vm *vm,
130 struct list_head *head)
131 {
132 struct radeon_bo_list *list;
133 unsigned i, idx, size;
134
135 size = (radeon_vm_num_pdes(rdev) + 1) * sizeof(struct radeon_bo_list);
136 list = kmalloc(size, GFP_KERNEL);
137 if (!list)
138 return NULL;
139
140 /* add the vm page table to the list */
141 list[0].bo = vm->page_directory;
142 list[0].domain = RADEON_GEM_DOMAIN_VRAM;
143 list[0].alt_domain = RADEON_GEM_DOMAIN_VRAM;
144 list[0].tv.bo = &vm->page_directory->tbo;
145 list_add(&list[0].tv.head, head);
146
147 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
148 if (!vm->page_tables[i].bo)
149 continue;
150
151 list[idx].bo = vm->page_tables[i].bo;
152 list[idx].domain = RADEON_GEM_DOMAIN_VRAM;
153 list[idx].alt_domain = RADEON_GEM_DOMAIN_VRAM;
154 list[idx].tv.bo = &list[idx].bo->tbo;
155 list_add(&list[idx++].tv.head, head);
156 }
157
158 return list;
159 }
160
161 /**
162 * radeon_vm_grab_id - allocate the next free VMID
163 *
164 * @rdev: radeon_device pointer
165 * @vm: vm to allocate id for
166 * @ring: ring we want to submit job to
167 *
168 * Allocate an id for the vm (cayman+).
169 * Returns the fence we need to sync to (if any).
170 *
171 * Global and local mutex must be locked!
172 */
173 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev,
174 struct radeon_vm *vm, int ring)
175 {
176 struct radeon_fence *best[RADEON_NUM_RINGS] = {};
177 unsigned choices[2] = {};
178 unsigned i;
179
180 /* check if the id is still valid */
181 if (vm->last_id_use && vm->last_id_use == rdev->vm_manager.active[vm->id])
182 return NULL;
183
184 /* we definately need to flush */
185 radeon_fence_unref(&vm->last_flush);
186
187 /* skip over VMID 0, since it is the system VM */
188 for (i = 1; i < rdev->vm_manager.nvm; ++i) {
189 struct radeon_fence *fence = rdev->vm_manager.active[i];
190
191 if (fence == NULL) {
192 /* found a free one */
193 vm->id = i;
194 trace_radeon_vm_grab_id(vm->id, ring);
195 return NULL;
196 }
197
198 if (radeon_fence_is_earlier(fence, best[fence->ring])) {
199 best[fence->ring] = fence;
200 choices[fence->ring == ring ? 0 : 1] = i;
201 }
202 }
203
204 for (i = 0; i < 2; ++i) {
205 if (choices[i]) {
206 vm->id = choices[i];
207 trace_radeon_vm_grab_id(vm->id, ring);
208 return rdev->vm_manager.active[choices[i]];
209 }
210 }
211
212 /* should never happen */
213 BUG();
214 return NULL;
215 }
216
217 /**
218 * radeon_vm_flush - hardware flush the vm
219 *
220 * @rdev: radeon_device pointer
221 * @vm: vm we want to flush
222 * @ring: ring to use for flush
223 *
224 * Flush the vm (cayman+).
225 *
226 * Global and local mutex must be locked!
227 */
228 void radeon_vm_flush(struct radeon_device *rdev,
229 struct radeon_vm *vm,
230 int ring)
231 {
232 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
233
234 /* if we can't remember our last VM flush then flush now! */
235 /* XXX figure out why we have to flush all the time */
236 if (!vm->last_flush || true || pd_addr != vm->pd_gpu_addr) {
237 vm->pd_gpu_addr = pd_addr;
238 radeon_ring_vm_flush(rdev, ring, vm);
239 }
240 }
241
242 /**
243 * radeon_vm_fence - remember fence for vm
244 *
245 * @rdev: radeon_device pointer
246 * @vm: vm we want to fence
247 * @fence: fence to remember
248 *
249 * Fence the vm (cayman+).
250 * Set the fence used to protect page table and id.
251 *
252 * Global and local mutex must be locked!
253 */
254 void radeon_vm_fence(struct radeon_device *rdev,
255 struct radeon_vm *vm,
256 struct radeon_fence *fence)
257 {
258 radeon_fence_unref(&vm->fence);
259 vm->fence = radeon_fence_ref(fence);
260
261 radeon_fence_unref(&rdev->vm_manager.active[vm->id]);
262 rdev->vm_manager.active[vm->id] = radeon_fence_ref(fence);
263
264 radeon_fence_unref(&vm->last_id_use);
265 vm->last_id_use = radeon_fence_ref(fence);
266
267 /* we just flushed the VM, remember that */
268 if (!vm->last_flush)
269 vm->last_flush = radeon_fence_ref(fence);
270 }
271
272 /**
273 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
274 *
275 * @vm: requested vm
276 * @bo: requested buffer object
277 *
278 * Find @bo inside the requested vm (cayman+).
279 * Search inside the @bos vm list for the requested vm
280 * Returns the found bo_va or NULL if none is found
281 *
282 * Object has to be reserved!
283 */
284 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm,
285 struct radeon_bo *bo)
286 {
287 struct radeon_bo_va *bo_va;
288
289 list_for_each_entry(bo_va, &bo->va, bo_list) {
290 if (bo_va->vm == vm) {
291 return bo_va;
292 }
293 }
294 return NULL;
295 }
296
297 /**
298 * radeon_vm_bo_add - add a bo to a specific vm
299 *
300 * @rdev: radeon_device pointer
301 * @vm: requested vm
302 * @bo: radeon buffer object
303 *
304 * Add @bo into the requested vm (cayman+).
305 * Add @bo to the list of bos associated with the vm
306 * Returns newly added bo_va or NULL for failure
307 *
308 * Object has to be reserved!
309 */
310 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
311 struct radeon_vm *vm,
312 struct radeon_bo *bo)
313 {
314 struct radeon_bo_va *bo_va;
315
316 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
317 if (bo_va == NULL) {
318 return NULL;
319 }
320 bo_va->vm = vm;
321 bo_va->bo = bo;
322 bo_va->soffset = 0;
323 bo_va->eoffset = 0;
324 bo_va->flags = 0;
325 bo_va->valid = false;
326 bo_va->ref_count = 1;
327 INIT_LIST_HEAD(&bo_va->bo_list);
328 INIT_LIST_HEAD(&bo_va->vm_list);
329
330 mutex_lock(&vm->mutex);
331 list_add(&bo_va->vm_list, &vm->va);
332 list_add_tail(&bo_va->bo_list, &bo->va);
333 mutex_unlock(&vm->mutex);
334
335 return bo_va;
336 }
337
338 /**
339 * radeon_vm_clear_bo - initially clear the page dir/table
340 *
341 * @rdev: radeon_device pointer
342 * @bo: bo to clear
343 */
344 static int radeon_vm_clear_bo(struct radeon_device *rdev,
345 struct radeon_bo *bo)
346 {
347 struct ttm_validate_buffer tv;
348 struct ww_acquire_ctx ticket;
349 struct list_head head;
350 struct radeon_ib ib;
351 unsigned entries;
352 uint64_t addr;
353 int r;
354
355 memset(&tv, 0, sizeof(tv));
356 tv.bo = &bo->tbo;
357
358 INIT_LIST_HEAD(&head);
359 list_add(&tv.head, &head);
360
361 r = ttm_eu_reserve_buffers(&ticket, &head);
362 if (r)
363 return r;
364
365 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
366 if (r)
367 goto error;
368
369 addr = radeon_bo_gpu_offset(bo);
370 entries = radeon_bo_size(bo) / 8;
371
372 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib,
373 NULL, entries * 2 + 64);
374 if (r)
375 goto error;
376
377 ib.length_dw = 0;
378
379 radeon_asic_vm_set_page(rdev, &ib, addr, 0, entries, 0, 0);
380
381 r = radeon_ib_schedule(rdev, &ib, NULL);
382 if (r)
383 goto error;
384
385 ttm_eu_fence_buffer_objects(&ticket, &head, ib.fence);
386 radeon_ib_free(rdev, &ib);
387
388 return 0;
389
390 error:
391 ttm_eu_backoff_reservation(&ticket, &head);
392 return r;
393 }
394
395 /**
396 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
397 *
398 * @rdev: radeon_device pointer
399 * @bo_va: bo_va to store the address
400 * @soffset: requested offset of the buffer in the VM address space
401 * @flags: attributes of pages (read/write/valid/etc.)
402 *
403 * Set offset of @bo_va (cayman+).
404 * Validate and set the offset requested within the vm address space.
405 * Returns 0 for success, error for failure.
406 *
407 * Object has to be reserved!
408 */
409 int radeon_vm_bo_set_addr(struct radeon_device *rdev,
410 struct radeon_bo_va *bo_va,
411 uint64_t soffset,
412 uint32_t flags)
413 {
414 uint64_t size = radeon_bo_size(bo_va->bo);
415 uint64_t eoffset, last_offset = 0;
416 struct radeon_vm *vm = bo_va->vm;
417 struct radeon_bo_va *tmp;
418 struct list_head *head;
419 unsigned last_pfn, pt_idx;
420 int r;
421
422 if (soffset) {
423 /* make sure object fit at this offset */
424 eoffset = soffset + size;
425 if (soffset >= eoffset) {
426 return -EINVAL;
427 }
428
429 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
430 if (last_pfn > rdev->vm_manager.max_pfn) {
431 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
432 last_pfn, rdev->vm_manager.max_pfn);
433 return -EINVAL;
434 }
435
436 } else {
437 eoffset = last_pfn = 0;
438 }
439
440 mutex_lock(&vm->mutex);
441 head = &vm->va;
442 last_offset = 0;
443 list_for_each_entry(tmp, &vm->va, vm_list) {
444 if (bo_va == tmp) {
445 /* skip over currently modified bo */
446 continue;
447 }
448
449 if (soffset >= last_offset && eoffset <= tmp->soffset) {
450 /* bo can be added before this one */
451 break;
452 }
453 if (eoffset > tmp->soffset && soffset < tmp->eoffset) {
454 /* bo and tmp overlap, invalid offset */
455 dev_err(rdev->dev, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
456 bo_va->bo, (unsigned)bo_va->soffset, tmp->bo,
457 (unsigned)tmp->soffset, (unsigned)tmp->eoffset);
458 mutex_unlock(&vm->mutex);
459 return -EINVAL;
460 }
461 last_offset = tmp->eoffset;
462 head = &tmp->vm_list;
463 }
464
465 bo_va->soffset = soffset;
466 bo_va->eoffset = eoffset;
467 bo_va->flags = flags;
468 bo_va->valid = false;
469 list_move(&bo_va->vm_list, head);
470
471 soffset = (soffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
472 eoffset = (eoffset / RADEON_GPU_PAGE_SIZE) >> RADEON_VM_BLOCK_SIZE;
473
474 if (eoffset > vm->max_pde_used)
475 vm->max_pde_used = eoffset;
476
477 radeon_bo_unreserve(bo_va->bo);
478
479 /* walk over the address space and allocate the page tables */
480 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) {
481 struct radeon_bo *pt;
482
483 if (vm->page_tables[pt_idx].bo)
484 continue;
485
486 /* drop mutex to allocate and clear page table */
487 mutex_unlock(&vm->mutex);
488
489 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
490 RADEON_GPU_PAGE_SIZE, false,
491 RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
492 if (r)
493 return r;
494
495 r = radeon_vm_clear_bo(rdev, pt);
496 if (r) {
497 radeon_bo_unref(&pt);
498 radeon_bo_reserve(bo_va->bo, false);
499 return r;
500 }
501
502 /* aquire mutex again */
503 mutex_lock(&vm->mutex);
504 if (vm->page_tables[pt_idx].bo) {
505 /* someone else allocated the pt in the meantime */
506 mutex_unlock(&vm->mutex);
507 radeon_bo_unref(&pt);
508 mutex_lock(&vm->mutex);
509 continue;
510 }
511
512 vm->page_tables[pt_idx].addr = 0;
513 vm->page_tables[pt_idx].bo = pt;
514 }
515
516 mutex_unlock(&vm->mutex);
517 return radeon_bo_reserve(bo_va->bo, false);
518 }
519
520 /**
521 * radeon_vm_map_gart - get the physical address of a gart page
522 *
523 * @rdev: radeon_device pointer
524 * @addr: the unmapped addr
525 *
526 * Look up the physical address of the page that the pte resolves
527 * to (cayman+).
528 * Returns the physical address of the page.
529 */
530 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
531 {
532 uint64_t result;
533
534 /* page table offset */
535 result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
536
537 /* in case cpu page size != gpu page size*/
538 result |= addr & (~PAGE_MASK);
539
540 return result;
541 }
542
543 /**
544 * radeon_vm_page_flags - translate page flags to what the hw uses
545 *
546 * @flags: flags comming from userspace
547 *
548 * Translate the flags the userspace ABI uses to hw flags.
549 */
550 static uint32_t radeon_vm_page_flags(uint32_t flags)
551 {
552 uint32_t hw_flags = 0;
553 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0;
554 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0;
555 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0;
556 if (flags & RADEON_VM_PAGE_SYSTEM) {
557 hw_flags |= R600_PTE_SYSTEM;
558 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0;
559 }
560 return hw_flags;
561 }
562
563 /**
564 * radeon_vm_update_pdes - make sure that page directory is valid
565 *
566 * @rdev: radeon_device pointer
567 * @vm: requested vm
568 * @start: start of GPU address range
569 * @end: end of GPU address range
570 *
571 * Allocates new page tables if necessary
572 * and updates the page directory (cayman+).
573 * Returns 0 for success, error for failure.
574 *
575 * Global and local mutex must be locked!
576 */
577 int radeon_vm_update_page_directory(struct radeon_device *rdev,
578 struct radeon_vm *vm)
579 {
580 static const uint32_t incr = RADEON_VM_PTE_COUNT * 8;
581
582 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory);
583 uint64_t last_pde = ~0, last_pt = ~0;
584 unsigned count = 0, pt_idx, ndw;
585 struct radeon_ib ib;
586 int r;
587
588 /* padding, etc. */
589 ndw = 64;
590
591 /* assume the worst case */
592 ndw += vm->max_pde_used * 12;
593
594 /* update too big for an IB */
595 if (ndw > 0xfffff)
596 return -ENOMEM;
597
598 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
599 if (r)
600 return r;
601 ib.length_dw = 0;
602
603 /* walk over the address space and update the page directory */
604 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) {
605 struct radeon_bo *bo = vm->page_tables[pt_idx].bo;
606 uint64_t pde, pt;
607
608 if (bo == NULL)
609 continue;
610
611 pt = radeon_bo_gpu_offset(bo);
612 if (vm->page_tables[pt_idx].addr == pt)
613 continue;
614 vm->page_tables[pt_idx].addr = pt;
615
616 pde = pd_addr + pt_idx * 8;
617 if (((last_pde + 8 * count) != pde) ||
618 ((last_pt + incr * count) != pt)) {
619
620 if (count) {
621 radeon_asic_vm_set_page(rdev, &ib, last_pde,
622 last_pt, count, incr,
623 R600_PTE_VALID);
624 }
625
626 count = 1;
627 last_pde = pde;
628 last_pt = pt;
629 } else {
630 ++count;
631 }
632 }
633
634 if (count)
635 radeon_asic_vm_set_page(rdev, &ib, last_pde, last_pt, count,
636 incr, R600_PTE_VALID);
637
638 if (ib.length_dw != 0) {
639 radeon_semaphore_sync_to(ib.semaphore, vm->last_id_use);
640 r = radeon_ib_schedule(rdev, &ib, NULL);
641 if (r) {
642 radeon_ib_free(rdev, &ib);
643 return r;
644 }
645 radeon_fence_unref(&vm->fence);
646 vm->fence = radeon_fence_ref(ib.fence);
647 radeon_fence_unref(&vm->last_flush);
648 }
649 radeon_ib_free(rdev, &ib);
650
651 return 0;
652 }
653
654 /**
655 * radeon_vm_update_ptes - make sure that page tables are valid
656 *
657 * @rdev: radeon_device pointer
658 * @vm: requested vm
659 * @start: start of GPU address range
660 * @end: end of GPU address range
661 * @dst: destination address to map to
662 * @flags: mapping flags
663 *
664 * Update the page tables in the range @start - @end (cayman+).
665 *
666 * Global and local mutex must be locked!
667 */
668 static void radeon_vm_update_ptes(struct radeon_device *rdev,
669 struct radeon_vm *vm,
670 struct radeon_ib *ib,
671 uint64_t start, uint64_t end,
672 uint64_t dst, uint32_t flags)
673 {
674 static const uint64_t mask = RADEON_VM_PTE_COUNT - 1;
675
676 uint64_t last_pte = ~0, last_dst = ~0;
677 unsigned count = 0;
678 uint64_t addr;
679
680 start = start / RADEON_GPU_PAGE_SIZE;
681 end = end / RADEON_GPU_PAGE_SIZE;
682
683 /* walk over the address space and update the page tables */
684 for (addr = start; addr < end; ) {
685 uint64_t pt_idx = addr >> RADEON_VM_BLOCK_SIZE;
686 unsigned nptes;
687 uint64_t pte;
688
689 if ((addr & ~mask) == (end & ~mask))
690 nptes = end - addr;
691 else
692 nptes = RADEON_VM_PTE_COUNT - (addr & mask);
693
694 pte = radeon_bo_gpu_offset(vm->page_tables[pt_idx].bo);
695 pte += (addr & mask) * 8;
696
697 if ((last_pte + 8 * count) != pte) {
698
699 if (count) {
700 radeon_asic_vm_set_page(rdev, ib, last_pte,
701 last_dst, count,
702 RADEON_GPU_PAGE_SIZE,
703 flags);
704 }
705
706 count = nptes;
707 last_pte = pte;
708 last_dst = dst;
709 } else {
710 count += nptes;
711 }
712
713 addr += nptes;
714 dst += nptes * RADEON_GPU_PAGE_SIZE;
715 }
716
717 if (count) {
718 radeon_asic_vm_set_page(rdev, ib, last_pte,
719 last_dst, count,
720 RADEON_GPU_PAGE_SIZE, flags);
721 }
722 }
723
724 /**
725 * radeon_vm_bo_update - map a bo into the vm page table
726 *
727 * @rdev: radeon_device pointer
728 * @vm: requested vm
729 * @bo: radeon buffer object
730 * @mem: ttm mem
731 *
732 * Fill in the page table entries for @bo (cayman+).
733 * Returns 0 for success, -EINVAL for failure.
734 *
735 * Object have to be reserved and mutex must be locked!
736 */
737 int radeon_vm_bo_update(struct radeon_device *rdev,
738 struct radeon_vm *vm,
739 struct radeon_bo *bo,
740 struct ttm_mem_reg *mem)
741 {
742 struct radeon_ib ib;
743 struct radeon_bo_va *bo_va;
744 unsigned nptes, ndw;
745 uint64_t addr;
746 int r;
747
748 bo_va = radeon_vm_bo_find(vm, bo);
749 if (bo_va == NULL) {
750 dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
751 return -EINVAL;
752 }
753
754 if (!bo_va->soffset) {
755 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
756 bo, vm);
757 return -EINVAL;
758 }
759
760 if ((bo_va->valid && mem) || (!bo_va->valid && mem == NULL))
761 return 0;
762
763 bo_va->flags &= ~RADEON_VM_PAGE_VALID;
764 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM;
765 if (mem) {
766 addr = mem->start << PAGE_SHIFT;
767 if (mem->mem_type != TTM_PL_SYSTEM) {
768 bo_va->flags |= RADEON_VM_PAGE_VALID;
769 bo_va->valid = true;
770 }
771 if (mem->mem_type == TTM_PL_TT) {
772 bo_va->flags |= RADEON_VM_PAGE_SYSTEM;
773 } else {
774 addr += rdev->vm_manager.vram_base_offset;
775 }
776 } else {
777 addr = 0;
778 bo_va->valid = false;
779 }
780
781 trace_radeon_vm_bo_update(bo_va);
782
783 nptes = radeon_bo_ngpu_pages(bo);
784
785 /* padding, etc. */
786 ndw = 64;
787
788 if (RADEON_VM_BLOCK_SIZE > 11)
789 /* reserve space for one header for every 2k dwords */
790 ndw += (nptes >> 11) * 4;
791 else
792 /* reserve space for one header for
793 every (1 << BLOCK_SIZE) entries */
794 ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
795
796 /* reserve space for pte addresses */
797 ndw += nptes * 2;
798
799 /* update too big for an IB */
800 if (ndw > 0xfffff)
801 return -ENOMEM;
802
803 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4);
804 if (r)
805 return r;
806 ib.length_dw = 0;
807
808 radeon_vm_update_ptes(rdev, vm, &ib, bo_va->soffset, bo_va->eoffset,
809 addr, radeon_vm_page_flags(bo_va->flags));
810
811 radeon_semaphore_sync_to(ib.semaphore, vm->fence);
812 r = radeon_ib_schedule(rdev, &ib, NULL);
813 if (r) {
814 radeon_ib_free(rdev, &ib);
815 return r;
816 }
817 radeon_fence_unref(&vm->fence);
818 vm->fence = radeon_fence_ref(ib.fence);
819 radeon_ib_free(rdev, &ib);
820 radeon_fence_unref(&vm->last_flush);
821
822 return 0;
823 }
824
825 /**
826 * radeon_vm_bo_rmv - remove a bo to a specific vm
827 *
828 * @rdev: radeon_device pointer
829 * @bo_va: requested bo_va
830 *
831 * Remove @bo_va->bo from the requested vm (cayman+).
832 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
833 * remove the ptes for @bo_va in the page table.
834 * Returns 0 for success.
835 *
836 * Object have to be reserved!
837 */
838 int radeon_vm_bo_rmv(struct radeon_device *rdev,
839 struct radeon_bo_va *bo_va)
840 {
841 int r = 0;
842
843 mutex_lock(&bo_va->vm->mutex);
844 if (bo_va->soffset)
845 r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
846
847 list_del(&bo_va->vm_list);
848 mutex_unlock(&bo_va->vm->mutex);
849 list_del(&bo_va->bo_list);
850
851 kfree(bo_va);
852 return r;
853 }
854
855 /**
856 * radeon_vm_bo_invalidate - mark the bo as invalid
857 *
858 * @rdev: radeon_device pointer
859 * @vm: requested vm
860 * @bo: radeon buffer object
861 *
862 * Mark @bo as invalid (cayman+).
863 */
864 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
865 struct radeon_bo *bo)
866 {
867 struct radeon_bo_va *bo_va;
868
869 list_for_each_entry(bo_va, &bo->va, bo_list) {
870 bo_va->valid = false;
871 }
872 }
873
874 /**
875 * radeon_vm_init - initialize a vm instance
876 *
877 * @rdev: radeon_device pointer
878 * @vm: requested vm
879 *
880 * Init @vm fields (cayman+).
881 */
882 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
883 {
884 unsigned pd_size, pd_entries, pts_size;
885 int r;
886
887 vm->id = 0;
888 vm->fence = NULL;
889 vm->last_flush = NULL;
890 vm->last_id_use = NULL;
891 mutex_init(&vm->mutex);
892 INIT_LIST_HEAD(&vm->va);
893
894 pd_size = radeon_vm_directory_size(rdev);
895 pd_entries = radeon_vm_num_pdes(rdev);
896
897 /* allocate page table array */
898 pts_size = pd_entries * sizeof(struct radeon_vm_pt);
899 vm->page_tables = kzalloc(pts_size, GFP_KERNEL);
900 if (vm->page_tables == NULL) {
901 DRM_ERROR("Cannot allocate memory for page table array\n");
902 return -ENOMEM;
903 }
904
905 r = radeon_bo_create(rdev, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false,
906 RADEON_GEM_DOMAIN_VRAM, NULL,
907 &vm->page_directory);
908 if (r)
909 return r;
910
911 r = radeon_vm_clear_bo(rdev, vm->page_directory);
912 if (r) {
913 radeon_bo_unref(&vm->page_directory);
914 vm->page_directory = NULL;
915 return r;
916 }
917
918 return 0;
919 }
920
921 /**
922 * radeon_vm_fini - tear down a vm instance
923 *
924 * @rdev: radeon_device pointer
925 * @vm: requested vm
926 *
927 * Tear down @vm (cayman+).
928 * Unbind the VM and remove all bos from the vm bo list
929 */
930 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
931 {
932 struct radeon_bo_va *bo_va, *tmp;
933 int i, r;
934
935 if (!list_empty(&vm->va)) {
936 dev_err(rdev->dev, "still active bo inside vm\n");
937 }
938 list_for_each_entry_safe(bo_va, tmp, &vm->va, vm_list) {
939 list_del_init(&bo_va->vm_list);
940 r = radeon_bo_reserve(bo_va->bo, false);
941 if (!r) {
942 list_del_init(&bo_va->bo_list);
943 radeon_bo_unreserve(bo_va->bo);
944 kfree(bo_va);
945 }
946 }
947
948
949 for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
950 radeon_bo_unref(&vm->page_tables[i].bo);
951 kfree(vm->page_tables);
952
953 radeon_bo_unref(&vm->page_directory);
954
955 radeon_fence_unref(&vm->fence);
956 radeon_fence_unref(&vm->last_flush);
957 radeon_fence_unref(&vm->last_id_use);
958
959 mutex_destroy(&vm->mutex);
960 }
This page took 0.077165 seconds and 4 git commands to generate.