81d91b513ce17f2e5af3bd1b696ab321be0cd6e9
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/radeon_drm.h>
31 #include "radeon_trace.h"
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
54 * radeon_vm_num_pde - return the number of page directory entries
56 * @rdev: radeon_device pointer
58 * Calculate the number of page directory entries (cayman+).
60 static unsigned radeon_vm_num_pdes(struct radeon_device
*rdev
)
62 return rdev
->vm_manager
.max_pfn
>> RADEON_VM_BLOCK_SIZE
;
66 * radeon_vm_directory_size - returns the size of the page directory in bytes
68 * @rdev: radeon_device pointer
70 * Calculate the size of the page directory in bytes (cayman+).
72 static unsigned radeon_vm_directory_size(struct radeon_device
*rdev
)
74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev
) * 8);
78 * radeon_vm_manager_init - init the vm manager
80 * @rdev: radeon_device pointer
82 * Init the vm manager (cayman+).
83 * Returns 0 for success, error for failure.
85 int radeon_vm_manager_init(struct radeon_device
*rdev
)
89 if (!rdev
->vm_manager
.enabled
) {
90 r
= radeon_asic_vm_init(rdev
);
94 rdev
->vm_manager
.enabled
= true;
100 * radeon_vm_manager_fini - tear down the vm manager
102 * @rdev: radeon_device pointer
104 * Tear down the VM manager (cayman+).
106 void radeon_vm_manager_fini(struct radeon_device
*rdev
)
110 if (!rdev
->vm_manager
.enabled
)
113 for (i
= 0; i
< RADEON_NUM_VM
; ++i
)
114 radeon_fence_unref(&rdev
->vm_manager
.active
[i
]);
115 radeon_asic_vm_fini(rdev
);
116 rdev
->vm_manager
.enabled
= false;
120 * radeon_vm_get_bos - add the vm BOs to a validation list
122 * @vm: vm providing the BOs
123 * @head: head of validation list
125 * Add the page directory to the list of BOs to
126 * validate for command submission (cayman+).
128 struct radeon_bo_list
*radeon_vm_get_bos(struct radeon_device
*rdev
,
129 struct radeon_vm
*vm
,
130 struct list_head
*head
)
132 struct radeon_bo_list
*list
;
133 unsigned i
, idx
, size
;
135 size
= (radeon_vm_num_pdes(rdev
) + 1) * sizeof(struct radeon_bo_list
);
136 list
= kmalloc(size
, GFP_KERNEL
);
140 /* add the vm page table to the list */
141 list
[0].bo
= vm
->page_directory
;
142 list
[0].domain
= RADEON_GEM_DOMAIN_VRAM
;
143 list
[0].alt_domain
= RADEON_GEM_DOMAIN_VRAM
;
144 list
[0].tv
.bo
= &vm
->page_directory
->tbo
;
145 list_add(&list
[0].tv
.head
, head
);
147 for (i
= 0, idx
= 1; i
<= vm
->max_pde_used
; i
++) {
148 if (!vm
->page_tables
[i
].bo
)
151 list
[idx
].bo
= vm
->page_tables
[i
].bo
;
152 list
[idx
].domain
= RADEON_GEM_DOMAIN_VRAM
;
153 list
[idx
].alt_domain
= RADEON_GEM_DOMAIN_VRAM
;
154 list
[idx
].tv
.bo
= &list
[idx
].bo
->tbo
;
155 list_add(&list
[idx
++].tv
.head
, head
);
162 * radeon_vm_grab_id - allocate the next free VMID
164 * @rdev: radeon_device pointer
165 * @vm: vm to allocate id for
166 * @ring: ring we want to submit job to
168 * Allocate an id for the vm (cayman+).
169 * Returns the fence we need to sync to (if any).
171 * Global and local mutex must be locked!
173 struct radeon_fence
*radeon_vm_grab_id(struct radeon_device
*rdev
,
174 struct radeon_vm
*vm
, int ring
)
176 struct radeon_fence
*best
[RADEON_NUM_RINGS
] = {};
177 unsigned choices
[2] = {};
180 /* check if the id is still valid */
181 if (vm
->last_id_use
&& vm
->last_id_use
== rdev
->vm_manager
.active
[vm
->id
])
184 /* we definately need to flush */
185 radeon_fence_unref(&vm
->last_flush
);
187 /* skip over VMID 0, since it is the system VM */
188 for (i
= 1; i
< rdev
->vm_manager
.nvm
; ++i
) {
189 struct radeon_fence
*fence
= rdev
->vm_manager
.active
[i
];
192 /* found a free one */
194 trace_radeon_vm_grab_id(vm
->id
, ring
);
198 if (radeon_fence_is_earlier(fence
, best
[fence
->ring
])) {
199 best
[fence
->ring
] = fence
;
200 choices
[fence
->ring
== ring
? 0 : 1] = i
;
204 for (i
= 0; i
< 2; ++i
) {
207 trace_radeon_vm_grab_id(vm
->id
, ring
);
208 return rdev
->vm_manager
.active
[choices
[i
]];
212 /* should never happen */
218 * radeon_vm_flush - hardware flush the vm
220 * @rdev: radeon_device pointer
221 * @vm: vm we want to flush
222 * @ring: ring to use for flush
224 * Flush the vm (cayman+).
226 * Global and local mutex must be locked!
228 void radeon_vm_flush(struct radeon_device
*rdev
,
229 struct radeon_vm
*vm
,
232 uint64_t pd_addr
= radeon_bo_gpu_offset(vm
->page_directory
);
234 /* if we can't remember our last VM flush then flush now! */
235 /* XXX figure out why we have to flush all the time */
236 if (!vm
->last_flush
|| true || pd_addr
!= vm
->pd_gpu_addr
) {
237 vm
->pd_gpu_addr
= pd_addr
;
238 radeon_ring_vm_flush(rdev
, ring
, vm
);
243 * radeon_vm_fence - remember fence for vm
245 * @rdev: radeon_device pointer
246 * @vm: vm we want to fence
247 * @fence: fence to remember
249 * Fence the vm (cayman+).
250 * Set the fence used to protect page table and id.
252 * Global and local mutex must be locked!
254 void radeon_vm_fence(struct radeon_device
*rdev
,
255 struct radeon_vm
*vm
,
256 struct radeon_fence
*fence
)
258 radeon_fence_unref(&vm
->fence
);
259 vm
->fence
= radeon_fence_ref(fence
);
261 radeon_fence_unref(&rdev
->vm_manager
.active
[vm
->id
]);
262 rdev
->vm_manager
.active
[vm
->id
] = radeon_fence_ref(fence
);
264 radeon_fence_unref(&vm
->last_id_use
);
265 vm
->last_id_use
= radeon_fence_ref(fence
);
267 /* we just flushed the VM, remember that */
269 vm
->last_flush
= radeon_fence_ref(fence
);
273 * radeon_vm_bo_find - find the bo_va for a specific vm & bo
276 * @bo: requested buffer object
278 * Find @bo inside the requested vm (cayman+).
279 * Search inside the @bos vm list for the requested vm
280 * Returns the found bo_va or NULL if none is found
282 * Object has to be reserved!
284 struct radeon_bo_va
*radeon_vm_bo_find(struct radeon_vm
*vm
,
285 struct radeon_bo
*bo
)
287 struct radeon_bo_va
*bo_va
;
289 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
290 if (bo_va
->vm
== vm
) {
298 * radeon_vm_bo_add - add a bo to a specific vm
300 * @rdev: radeon_device pointer
302 * @bo: radeon buffer object
304 * Add @bo into the requested vm (cayman+).
305 * Add @bo to the list of bos associated with the vm
306 * Returns newly added bo_va or NULL for failure
308 * Object has to be reserved!
310 struct radeon_bo_va
*radeon_vm_bo_add(struct radeon_device
*rdev
,
311 struct radeon_vm
*vm
,
312 struct radeon_bo
*bo
)
314 struct radeon_bo_va
*bo_va
;
316 bo_va
= kzalloc(sizeof(struct radeon_bo_va
), GFP_KERNEL
);
325 bo_va
->valid
= false;
326 bo_va
->ref_count
= 1;
327 INIT_LIST_HEAD(&bo_va
->bo_list
);
328 INIT_LIST_HEAD(&bo_va
->vm_list
);
330 mutex_lock(&vm
->mutex
);
331 list_add(&bo_va
->vm_list
, &vm
->va
);
332 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
333 mutex_unlock(&vm
->mutex
);
339 * radeon_vm_clear_bo - initially clear the page dir/table
341 * @rdev: radeon_device pointer
344 static int radeon_vm_clear_bo(struct radeon_device
*rdev
,
345 struct radeon_bo
*bo
)
347 struct ttm_validate_buffer tv
;
348 struct ww_acquire_ctx ticket
;
349 struct list_head head
;
355 memset(&tv
, 0, sizeof(tv
));
358 INIT_LIST_HEAD(&head
);
359 list_add(&tv
.head
, &head
);
361 r
= ttm_eu_reserve_buffers(&ticket
, &head
);
365 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
369 addr
= radeon_bo_gpu_offset(bo
);
370 entries
= radeon_bo_size(bo
) / 8;
372 r
= radeon_ib_get(rdev
, R600_RING_TYPE_DMA_INDEX
, &ib
,
373 NULL
, entries
* 2 + 64);
379 radeon_asic_vm_set_page(rdev
, &ib
, addr
, 0, entries
, 0, 0);
381 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
385 ttm_eu_fence_buffer_objects(&ticket
, &head
, ib
.fence
);
386 radeon_ib_free(rdev
, &ib
);
391 ttm_eu_backoff_reservation(&ticket
, &head
);
396 * radeon_vm_bo_set_addr - set bos virtual address inside a vm
398 * @rdev: radeon_device pointer
399 * @bo_va: bo_va to store the address
400 * @soffset: requested offset of the buffer in the VM address space
401 * @flags: attributes of pages (read/write/valid/etc.)
403 * Set offset of @bo_va (cayman+).
404 * Validate and set the offset requested within the vm address space.
405 * Returns 0 for success, error for failure.
407 * Object has to be reserved!
409 int radeon_vm_bo_set_addr(struct radeon_device
*rdev
,
410 struct radeon_bo_va
*bo_va
,
414 uint64_t size
= radeon_bo_size(bo_va
->bo
);
415 uint64_t eoffset
, last_offset
= 0;
416 struct radeon_vm
*vm
= bo_va
->vm
;
417 struct radeon_bo_va
*tmp
;
418 struct list_head
*head
;
419 unsigned last_pfn
, pt_idx
;
423 /* make sure object fit at this offset */
424 eoffset
= soffset
+ size
;
425 if (soffset
>= eoffset
) {
429 last_pfn
= eoffset
/ RADEON_GPU_PAGE_SIZE
;
430 if (last_pfn
> rdev
->vm_manager
.max_pfn
) {
431 dev_err(rdev
->dev
, "va above limit (0x%08X > 0x%08X)\n",
432 last_pfn
, rdev
->vm_manager
.max_pfn
);
437 eoffset
= last_pfn
= 0;
440 mutex_lock(&vm
->mutex
);
443 list_for_each_entry(tmp
, &vm
->va
, vm_list
) {
445 /* skip over currently modified bo */
449 if (soffset
>= last_offset
&& eoffset
<= tmp
->soffset
) {
450 /* bo can be added before this one */
453 if (eoffset
> tmp
->soffset
&& soffset
< tmp
->eoffset
) {
454 /* bo and tmp overlap, invalid offset */
455 dev_err(rdev
->dev
, "bo %p va 0x%08X conflict with (bo %p 0x%08X 0x%08X)\n",
456 bo_va
->bo
, (unsigned)bo_va
->soffset
, tmp
->bo
,
457 (unsigned)tmp
->soffset
, (unsigned)tmp
->eoffset
);
458 mutex_unlock(&vm
->mutex
);
461 last_offset
= tmp
->eoffset
;
462 head
= &tmp
->vm_list
;
465 bo_va
->soffset
= soffset
;
466 bo_va
->eoffset
= eoffset
;
467 bo_va
->flags
= flags
;
468 bo_va
->valid
= false;
469 list_move(&bo_va
->vm_list
, head
);
471 soffset
= (soffset
/ RADEON_GPU_PAGE_SIZE
) >> RADEON_VM_BLOCK_SIZE
;
472 eoffset
= (eoffset
/ RADEON_GPU_PAGE_SIZE
) >> RADEON_VM_BLOCK_SIZE
;
474 if (eoffset
> vm
->max_pde_used
)
475 vm
->max_pde_used
= eoffset
;
477 radeon_bo_unreserve(bo_va
->bo
);
479 /* walk over the address space and allocate the page tables */
480 for (pt_idx
= soffset
; pt_idx
<= eoffset
; ++pt_idx
) {
481 struct radeon_bo
*pt
;
483 if (vm
->page_tables
[pt_idx
].bo
)
486 /* drop mutex to allocate and clear page table */
487 mutex_unlock(&vm
->mutex
);
489 r
= radeon_bo_create(rdev
, RADEON_VM_PTE_COUNT
* 8,
490 RADEON_GPU_PAGE_SIZE
, false,
491 RADEON_GEM_DOMAIN_VRAM
, NULL
, &pt
);
495 r
= radeon_vm_clear_bo(rdev
, pt
);
497 radeon_bo_unref(&pt
);
498 radeon_bo_reserve(bo_va
->bo
, false);
502 /* aquire mutex again */
503 mutex_lock(&vm
->mutex
);
504 if (vm
->page_tables
[pt_idx
].bo
) {
505 /* someone else allocated the pt in the meantime */
506 mutex_unlock(&vm
->mutex
);
507 radeon_bo_unref(&pt
);
508 mutex_lock(&vm
->mutex
);
512 vm
->page_tables
[pt_idx
].addr
= 0;
513 vm
->page_tables
[pt_idx
].bo
= pt
;
516 mutex_unlock(&vm
->mutex
);
517 return radeon_bo_reserve(bo_va
->bo
, false);
521 * radeon_vm_map_gart - get the physical address of a gart page
523 * @rdev: radeon_device pointer
524 * @addr: the unmapped addr
526 * Look up the physical address of the page that the pte resolves
528 * Returns the physical address of the page.
530 uint64_t radeon_vm_map_gart(struct radeon_device
*rdev
, uint64_t addr
)
534 /* page table offset */
535 result
= rdev
->gart
.pages_addr
[addr
>> PAGE_SHIFT
];
537 /* in case cpu page size != gpu page size*/
538 result
|= addr
& (~PAGE_MASK
);
544 * radeon_vm_page_flags - translate page flags to what the hw uses
546 * @flags: flags comming from userspace
548 * Translate the flags the userspace ABI uses to hw flags.
550 static uint32_t radeon_vm_page_flags(uint32_t flags
)
552 uint32_t hw_flags
= 0;
553 hw_flags
|= (flags
& RADEON_VM_PAGE_VALID
) ? R600_PTE_VALID
: 0;
554 hw_flags
|= (flags
& RADEON_VM_PAGE_READABLE
) ? R600_PTE_READABLE
: 0;
555 hw_flags
|= (flags
& RADEON_VM_PAGE_WRITEABLE
) ? R600_PTE_WRITEABLE
: 0;
556 if (flags
& RADEON_VM_PAGE_SYSTEM
) {
557 hw_flags
|= R600_PTE_SYSTEM
;
558 hw_flags
|= (flags
& RADEON_VM_PAGE_SNOOPED
) ? R600_PTE_SNOOPED
: 0;
564 * radeon_vm_update_pdes - make sure that page directory is valid
566 * @rdev: radeon_device pointer
568 * @start: start of GPU address range
569 * @end: end of GPU address range
571 * Allocates new page tables if necessary
572 * and updates the page directory (cayman+).
573 * Returns 0 for success, error for failure.
575 * Global and local mutex must be locked!
577 int radeon_vm_update_page_directory(struct radeon_device
*rdev
,
578 struct radeon_vm
*vm
)
580 static const uint32_t incr
= RADEON_VM_PTE_COUNT
* 8;
582 uint64_t pd_addr
= radeon_bo_gpu_offset(vm
->page_directory
);
583 uint64_t last_pde
= ~0, last_pt
= ~0;
584 unsigned count
= 0, pt_idx
, ndw
;
591 /* assume the worst case */
592 ndw
+= vm
->max_pde_used
* 12;
594 /* update too big for an IB */
598 r
= radeon_ib_get(rdev
, R600_RING_TYPE_DMA_INDEX
, &ib
, NULL
, ndw
* 4);
603 /* walk over the address space and update the page directory */
604 for (pt_idx
= 0; pt_idx
<= vm
->max_pde_used
; ++pt_idx
) {
605 struct radeon_bo
*bo
= vm
->page_tables
[pt_idx
].bo
;
611 pt
= radeon_bo_gpu_offset(bo
);
612 if (vm
->page_tables
[pt_idx
].addr
== pt
)
614 vm
->page_tables
[pt_idx
].addr
= pt
;
616 pde
= pd_addr
+ pt_idx
* 8;
617 if (((last_pde
+ 8 * count
) != pde
) ||
618 ((last_pt
+ incr
* count
) != pt
)) {
621 radeon_asic_vm_set_page(rdev
, &ib
, last_pde
,
622 last_pt
, count
, incr
,
635 radeon_asic_vm_set_page(rdev
, &ib
, last_pde
, last_pt
, count
,
636 incr
, R600_PTE_VALID
);
638 if (ib
.length_dw
!= 0) {
639 radeon_semaphore_sync_to(ib
.semaphore
, vm
->last_id_use
);
640 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
642 radeon_ib_free(rdev
, &ib
);
645 radeon_fence_unref(&vm
->fence
);
646 vm
->fence
= radeon_fence_ref(ib
.fence
);
647 radeon_fence_unref(&vm
->last_flush
);
649 radeon_ib_free(rdev
, &ib
);
655 * radeon_vm_update_ptes - make sure that page tables are valid
657 * @rdev: radeon_device pointer
659 * @start: start of GPU address range
660 * @end: end of GPU address range
661 * @dst: destination address to map to
662 * @flags: mapping flags
664 * Update the page tables in the range @start - @end (cayman+).
666 * Global and local mutex must be locked!
668 static void radeon_vm_update_ptes(struct radeon_device
*rdev
,
669 struct radeon_vm
*vm
,
670 struct radeon_ib
*ib
,
671 uint64_t start
, uint64_t end
,
672 uint64_t dst
, uint32_t flags
)
674 static const uint64_t mask
= RADEON_VM_PTE_COUNT
- 1;
676 uint64_t last_pte
= ~0, last_dst
= ~0;
680 start
= start
/ RADEON_GPU_PAGE_SIZE
;
681 end
= end
/ RADEON_GPU_PAGE_SIZE
;
683 /* walk over the address space and update the page tables */
684 for (addr
= start
; addr
< end
; ) {
685 uint64_t pt_idx
= addr
>> RADEON_VM_BLOCK_SIZE
;
689 if ((addr
& ~mask
) == (end
& ~mask
))
692 nptes
= RADEON_VM_PTE_COUNT
- (addr
& mask
);
694 pte
= radeon_bo_gpu_offset(vm
->page_tables
[pt_idx
].bo
);
695 pte
+= (addr
& mask
) * 8;
697 if ((last_pte
+ 8 * count
) != pte
) {
700 radeon_asic_vm_set_page(rdev
, ib
, last_pte
,
702 RADEON_GPU_PAGE_SIZE
,
714 dst
+= nptes
* RADEON_GPU_PAGE_SIZE
;
718 radeon_asic_vm_set_page(rdev
, ib
, last_pte
,
720 RADEON_GPU_PAGE_SIZE
, flags
);
725 * radeon_vm_bo_update - map a bo into the vm page table
727 * @rdev: radeon_device pointer
729 * @bo: radeon buffer object
732 * Fill in the page table entries for @bo (cayman+).
733 * Returns 0 for success, -EINVAL for failure.
735 * Object have to be reserved and mutex must be locked!
737 int radeon_vm_bo_update(struct radeon_device
*rdev
,
738 struct radeon_vm
*vm
,
739 struct radeon_bo
*bo
,
740 struct ttm_mem_reg
*mem
)
743 struct radeon_bo_va
*bo_va
;
748 bo_va
= radeon_vm_bo_find(vm
, bo
);
750 dev_err(rdev
->dev
, "bo %p not in vm %p\n", bo
, vm
);
754 if (!bo_va
->soffset
) {
755 dev_err(rdev
->dev
, "bo %p don't has a mapping in vm %p\n",
760 if ((bo_va
->valid
&& mem
) || (!bo_va
->valid
&& mem
== NULL
))
763 bo_va
->flags
&= ~RADEON_VM_PAGE_VALID
;
764 bo_va
->flags
&= ~RADEON_VM_PAGE_SYSTEM
;
766 addr
= mem
->start
<< PAGE_SHIFT
;
767 if (mem
->mem_type
!= TTM_PL_SYSTEM
) {
768 bo_va
->flags
|= RADEON_VM_PAGE_VALID
;
771 if (mem
->mem_type
== TTM_PL_TT
) {
772 bo_va
->flags
|= RADEON_VM_PAGE_SYSTEM
;
774 addr
+= rdev
->vm_manager
.vram_base_offset
;
778 bo_va
->valid
= false;
781 trace_radeon_vm_bo_update(bo_va
);
783 nptes
= radeon_bo_ngpu_pages(bo
);
788 if (RADEON_VM_BLOCK_SIZE
> 11)
789 /* reserve space for one header for every 2k dwords */
790 ndw
+= (nptes
>> 11) * 4;
792 /* reserve space for one header for
793 every (1 << BLOCK_SIZE) entries */
794 ndw
+= (nptes
>> RADEON_VM_BLOCK_SIZE
) * 4;
796 /* reserve space for pte addresses */
799 /* update too big for an IB */
803 r
= radeon_ib_get(rdev
, R600_RING_TYPE_DMA_INDEX
, &ib
, NULL
, ndw
* 4);
808 radeon_vm_update_ptes(rdev
, vm
, &ib
, bo_va
->soffset
, bo_va
->eoffset
,
809 addr
, radeon_vm_page_flags(bo_va
->flags
));
811 radeon_semaphore_sync_to(ib
.semaphore
, vm
->fence
);
812 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
814 radeon_ib_free(rdev
, &ib
);
817 radeon_fence_unref(&vm
->fence
);
818 vm
->fence
= radeon_fence_ref(ib
.fence
);
819 radeon_ib_free(rdev
, &ib
);
820 radeon_fence_unref(&vm
->last_flush
);
826 * radeon_vm_bo_rmv - remove a bo to a specific vm
828 * @rdev: radeon_device pointer
829 * @bo_va: requested bo_va
831 * Remove @bo_va->bo from the requested vm (cayman+).
832 * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
833 * remove the ptes for @bo_va in the page table.
834 * Returns 0 for success.
836 * Object have to be reserved!
838 int radeon_vm_bo_rmv(struct radeon_device
*rdev
,
839 struct radeon_bo_va
*bo_va
)
843 mutex_lock(&bo_va
->vm
->mutex
);
845 r
= radeon_vm_bo_update(rdev
, bo_va
->vm
, bo_va
->bo
, NULL
);
847 list_del(&bo_va
->vm_list
);
848 mutex_unlock(&bo_va
->vm
->mutex
);
849 list_del(&bo_va
->bo_list
);
856 * radeon_vm_bo_invalidate - mark the bo as invalid
858 * @rdev: radeon_device pointer
860 * @bo: radeon buffer object
862 * Mark @bo as invalid (cayman+).
864 void radeon_vm_bo_invalidate(struct radeon_device
*rdev
,
865 struct radeon_bo
*bo
)
867 struct radeon_bo_va
*bo_va
;
869 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
870 bo_va
->valid
= false;
875 * radeon_vm_init - initialize a vm instance
877 * @rdev: radeon_device pointer
880 * Init @vm fields (cayman+).
882 int radeon_vm_init(struct radeon_device
*rdev
, struct radeon_vm
*vm
)
884 unsigned pd_size
, pd_entries
, pts_size
;
889 vm
->last_flush
= NULL
;
890 vm
->last_id_use
= NULL
;
891 mutex_init(&vm
->mutex
);
892 INIT_LIST_HEAD(&vm
->va
);
894 pd_size
= radeon_vm_directory_size(rdev
);
895 pd_entries
= radeon_vm_num_pdes(rdev
);
897 /* allocate page table array */
898 pts_size
= pd_entries
* sizeof(struct radeon_vm_pt
);
899 vm
->page_tables
= kzalloc(pts_size
, GFP_KERNEL
);
900 if (vm
->page_tables
== NULL
) {
901 DRM_ERROR("Cannot allocate memory for page table array\n");
905 r
= radeon_bo_create(rdev
, pd_size
, RADEON_VM_PTB_ALIGN_SIZE
, false,
906 RADEON_GEM_DOMAIN_VRAM
, NULL
,
907 &vm
->page_directory
);
911 r
= radeon_vm_clear_bo(rdev
, vm
->page_directory
);
913 radeon_bo_unref(&vm
->page_directory
);
914 vm
->page_directory
= NULL
;
922 * radeon_vm_fini - tear down a vm instance
924 * @rdev: radeon_device pointer
927 * Tear down @vm (cayman+).
928 * Unbind the VM and remove all bos from the vm bo list
930 void radeon_vm_fini(struct radeon_device
*rdev
, struct radeon_vm
*vm
)
932 struct radeon_bo_va
*bo_va
, *tmp
;
935 if (!list_empty(&vm
->va
)) {
936 dev_err(rdev
->dev
, "still active bo inside vm\n");
938 list_for_each_entry_safe(bo_va
, tmp
, &vm
->va
, vm_list
) {
939 list_del_init(&bo_va
->vm_list
);
940 r
= radeon_bo_reserve(bo_va
->bo
, false);
942 list_del_init(&bo_va
->bo_list
);
943 radeon_bo_unreserve(bo_va
->bo
);
949 for (i
= 0; i
< radeon_vm_num_pdes(rdev
); i
++)
950 radeon_bo_unref(&vm
->page_tables
[i
].bo
);
951 kfree(vm
->page_tables
);
953 radeon_bo_unref(&vm
->page_directory
);
955 radeon_fence_unref(&vm
->fence
);
956 radeon_fence_unref(&vm
->last_flush
);
957 radeon_fence_unref(&vm
->last_id_use
);
959 mutex_destroy(&vm
->mutex
);
This page took 0.077165 seconds and 4 git commands to generate.