2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_trace.h"
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
54 * amdgpu_vm_num_pde - return the number of page directory entries
56 * @adev: amdgpu_device pointer
58 * Calculate the number of page directory entries (cayman+).
60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device
*adev
)
62 return adev
->vm_manager
.max_pfn
>> amdgpu_vm_block_size
;
66 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
68 * @adev: amdgpu_device pointer
70 * Calculate the size of the page directory in bytes (cayman+).
72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device
*adev
)
74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev
) * 8);
78 * amdgpu_vm_get_pd_bo - add the VM PD to a validation list
80 * @vm: vm providing the BOs
81 * @validated: head of validation list
82 * @entry: entry to add
84 * Add the page directory to the list of BOs to
85 * validate for command submission.
87 void amdgpu_vm_get_pd_bo(struct amdgpu_vm
*vm
,
88 struct list_head
*validated
,
89 struct amdgpu_bo_list_entry
*entry
)
91 entry
->robj
= vm
->page_directory
;
92 entry
->prefered_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
93 entry
->allowed_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
95 entry
->tv
.bo
= &vm
->page_directory
->tbo
;
96 entry
->tv
.shared
= true;
97 list_add(&entry
->tv
.head
, validated
);
101 * amdgpu_vm_get_bos - add the vm BOs to a validation list
103 * @vm: vm providing the BOs
104 * @duplicates: head of duplicates list
106 * Add the page directory to the list of BOs to
107 * validate for command submission (cayman+).
109 struct amdgpu_bo_list_entry
*amdgpu_vm_get_pt_bos(struct amdgpu_vm
*vm
,
110 struct list_head
*duplicates
)
112 struct amdgpu_bo_list_entry
*list
;
115 list
= drm_malloc_ab(vm
->max_pde_used
+ 1,
116 sizeof(struct amdgpu_bo_list_entry
));
120 /* add the vm page table to the list */
121 for (i
= 0, idx
= 0; i
<= vm
->max_pde_used
; i
++) {
122 if (!vm
->page_tables
[i
].bo
)
125 list
[idx
].robj
= vm
->page_tables
[i
].bo
;
126 list
[idx
].prefered_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
127 list
[idx
].allowed_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
128 list
[idx
].priority
= 0;
129 list
[idx
].tv
.bo
= &list
[idx
].robj
->tbo
;
130 list
[idx
].tv
.shared
= true;
131 list_add(&list
[idx
++].tv
.head
, duplicates
);
138 * amdgpu_vm_grab_id - allocate the next free VMID
140 * @vm: vm to allocate id for
141 * @ring: ring we want to submit job to
142 * @sync: sync object where we add dependencies
144 * Allocate an id for the vm, adding fences to the sync obj as necessary.
146 * Global mutex must be locked!
148 int amdgpu_vm_grab_id(struct amdgpu_vm
*vm
, struct amdgpu_ring
*ring
,
149 struct amdgpu_sync
*sync
)
151 struct fence
*best
[AMDGPU_MAX_RINGS
] = {};
152 struct amdgpu_vm_id
*vm_id
= &vm
->ids
[ring
->idx
];
153 struct amdgpu_device
*adev
= ring
->adev
;
155 unsigned choices
[2] = {};
158 /* check if the id is still valid */
160 unsigned id
= vm_id
->id
;
163 owner
= atomic_long_read(&adev
->vm_manager
.ids
[id
].owner
);
164 if (owner
== (long)vm
) {
165 trace_amdgpu_vm_grab_id(vm_id
->id
, ring
->idx
);
170 /* we definately need to flush */
171 vm_id
->pd_gpu_addr
= ~0ll;
173 /* skip over VMID 0, since it is the system VM */
174 for (i
= 1; i
< adev
->vm_manager
.nvm
; ++i
) {
175 struct fence
*fence
= adev
->vm_manager
.ids
[i
].active
;
176 struct amdgpu_ring
*fring
;
179 /* found a free one */
181 trace_amdgpu_vm_grab_id(i
, ring
->idx
);
185 fring
= amdgpu_ring_from_fence(fence
);
186 if (best
[fring
->idx
] == NULL
||
187 fence_is_later(best
[fring
->idx
], fence
)) {
188 best
[fring
->idx
] = fence
;
189 choices
[fring
== ring
? 0 : 1] = i
;
193 for (i
= 0; i
< 2; ++i
) {
197 fence
= adev
->vm_manager
.ids
[choices
[i
]].active
;
198 vm_id
->id
= choices
[i
];
200 trace_amdgpu_vm_grab_id(choices
[i
], ring
->idx
);
201 return amdgpu_sync_fence(ring
->adev
, sync
, fence
);
205 /* should never happen */
211 * amdgpu_vm_flush - hardware flush the vm
213 * @ring: ring to use for flush
214 * @vm: vm we want to flush
215 * @updates: last vm update that we waited for
217 * Flush the vm (cayman+).
219 * Global and local mutex must be locked!
221 void amdgpu_vm_flush(struct amdgpu_ring
*ring
,
222 struct amdgpu_vm
*vm
,
223 struct fence
*updates
)
225 uint64_t pd_addr
= amdgpu_bo_gpu_offset(vm
->page_directory
);
226 struct amdgpu_vm_id
*vm_id
= &vm
->ids
[ring
->idx
];
227 struct fence
*flushed_updates
= vm_id
->flushed_updates
;
230 if (!flushed_updates
)
235 is_later
= fence_is_later(updates
, flushed_updates
);
237 if (pd_addr
!= vm_id
->pd_gpu_addr
|| is_later
) {
238 trace_amdgpu_vm_flush(pd_addr
, ring
->idx
, vm_id
->id
);
240 vm_id
->flushed_updates
= fence_get(updates
);
241 fence_put(flushed_updates
);
243 vm_id
->pd_gpu_addr
= pd_addr
;
244 amdgpu_ring_emit_vm_flush(ring
, vm_id
->id
, vm_id
->pd_gpu_addr
);
249 * amdgpu_vm_fence - remember fence for vm
251 * @adev: amdgpu_device pointer
252 * @vm: vm we want to fence
253 * @fence: fence to remember
255 * Fence the vm (cayman+).
256 * Set the fence used to protect page table and id.
258 * Global and local mutex must be locked!
260 void amdgpu_vm_fence(struct amdgpu_device
*adev
,
261 struct amdgpu_vm
*vm
,
264 struct amdgpu_ring
*ring
= amdgpu_ring_from_fence(fence
);
265 unsigned vm_id
= vm
->ids
[ring
->idx
].id
;
267 fence_put(adev
->vm_manager
.ids
[vm_id
].active
);
268 adev
->vm_manager
.ids
[vm_id
].active
= fence_get(fence
);
269 atomic_long_set(&adev
->vm_manager
.ids
[vm_id
].owner
, (long)vm
);
273 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
276 * @bo: requested buffer object
278 * Find @bo inside the requested vm (cayman+).
279 * Search inside the @bos vm list for the requested vm
280 * Returns the found bo_va or NULL if none is found
282 * Object has to be reserved!
284 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
285 struct amdgpu_bo
*bo
)
287 struct amdgpu_bo_va
*bo_va
;
289 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
290 if (bo_va
->vm
== vm
) {
298 * amdgpu_vm_update_pages - helper to call the right asic function
300 * @adev: amdgpu_device pointer
301 * @ib: indirect buffer to fill with commands
302 * @pe: addr of the page entry
303 * @addr: dst addr to write into pe
304 * @count: number of page entries to update
305 * @incr: increase next addr by incr bytes
306 * @flags: hw access flags
307 * @gtt_flags: GTT hw access flags
309 * Traces the parameters and calls the right asic functions
310 * to setup the page table using the DMA.
312 static void amdgpu_vm_update_pages(struct amdgpu_device
*adev
,
313 struct amdgpu_ib
*ib
,
314 uint64_t pe
, uint64_t addr
,
315 unsigned count
, uint32_t incr
,
316 uint32_t flags
, uint32_t gtt_flags
)
318 trace_amdgpu_vm_set_page(pe
, addr
, count
, incr
, flags
);
320 if ((flags
& AMDGPU_PTE_SYSTEM
) && (flags
== gtt_flags
)) {
321 uint64_t src
= adev
->gart
.table_addr
+ (addr
>> 12) * 8;
322 amdgpu_vm_copy_pte(adev
, ib
, pe
, src
, count
);
324 } else if ((flags
& AMDGPU_PTE_SYSTEM
) || (count
< 3)) {
325 amdgpu_vm_write_pte(adev
, ib
, pe
, addr
,
329 amdgpu_vm_set_pte_pde(adev
, ib
, pe
, addr
,
334 int amdgpu_vm_free_job(struct amdgpu_job
*job
)
337 for (i
= 0; i
< job
->num_ibs
; i
++)
338 amdgpu_ib_free(job
->adev
, &job
->ibs
[i
]);
344 * amdgpu_vm_clear_bo - initially clear the page dir/table
346 * @adev: amdgpu_device pointer
349 * need to reserve bo first before calling it.
351 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
352 struct amdgpu_bo
*bo
)
354 struct amdgpu_ring
*ring
= adev
->vm_manager
.vm_pte_funcs_ring
;
355 struct fence
*fence
= NULL
;
356 struct amdgpu_ib
*ib
;
361 r
= reservation_object_reserve_shared(bo
->tbo
.resv
);
365 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
369 addr
= amdgpu_bo_gpu_offset(bo
);
370 entries
= amdgpu_bo_size(bo
) / 8;
372 ib
= kzalloc(sizeof(struct amdgpu_ib
), GFP_KERNEL
);
376 r
= amdgpu_ib_get(ring
, NULL
, entries
* 2 + 64, ib
);
382 amdgpu_vm_update_pages(adev
, ib
, addr
, 0, entries
, 0, 0, 0);
383 amdgpu_vm_pad_ib(adev
, ib
);
384 WARN_ON(ib
->length_dw
> 64);
385 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, ib
, 1,
387 AMDGPU_FENCE_OWNER_VM
,
390 amdgpu_bo_fence(bo
, fence
, true);
392 if (amdgpu_enable_scheduler
)
396 amdgpu_ib_free(adev
, ib
);
404 * amdgpu_vm_map_gart - get the physical address of a gart page
406 * @adev: amdgpu_device pointer
407 * @addr: the unmapped addr
409 * Look up the physical address of the page that the pte resolves
411 * Returns the physical address of the page.
413 uint64_t amdgpu_vm_map_gart(struct amdgpu_device
*adev
, uint64_t addr
)
417 /* page table offset */
418 result
= adev
->gart
.pages_addr
[addr
>> PAGE_SHIFT
];
420 /* in case cpu page size != gpu page size*/
421 result
|= addr
& (~PAGE_MASK
);
427 * amdgpu_vm_update_pdes - make sure that page directory is valid
429 * @adev: amdgpu_device pointer
431 * @start: start of GPU address range
432 * @end: end of GPU address range
434 * Allocates new page tables if necessary
435 * and updates the page directory (cayman+).
436 * Returns 0 for success, error for failure.
438 * Global and local mutex must be locked!
440 int amdgpu_vm_update_page_directory(struct amdgpu_device
*adev
,
441 struct amdgpu_vm
*vm
)
443 struct amdgpu_ring
*ring
= adev
->vm_manager
.vm_pte_funcs_ring
;
444 struct amdgpu_bo
*pd
= vm
->page_directory
;
445 uint64_t pd_addr
= amdgpu_bo_gpu_offset(pd
);
446 uint32_t incr
= AMDGPU_VM_PTE_COUNT
* 8;
447 uint64_t last_pde
= ~0, last_pt
= ~0;
448 unsigned count
= 0, pt_idx
, ndw
;
449 struct amdgpu_ib
*ib
;
450 struct fence
*fence
= NULL
;
457 /* assume the worst case */
458 ndw
+= vm
->max_pde_used
* 6;
460 /* update too big for an IB */
464 ib
= kzalloc(sizeof(struct amdgpu_ib
), GFP_KERNEL
);
468 r
= amdgpu_ib_get(ring
, NULL
, ndw
* 4, ib
);
475 /* walk over the address space and update the page directory */
476 for (pt_idx
= 0; pt_idx
<= vm
->max_pde_used
; ++pt_idx
) {
477 struct amdgpu_bo
*bo
= vm
->page_tables
[pt_idx
].bo
;
483 pt
= amdgpu_bo_gpu_offset(bo
);
484 if (vm
->page_tables
[pt_idx
].addr
== pt
)
486 vm
->page_tables
[pt_idx
].addr
= pt
;
488 pde
= pd_addr
+ pt_idx
* 8;
489 if (((last_pde
+ 8 * count
) != pde
) ||
490 ((last_pt
+ incr
* count
) != pt
)) {
493 amdgpu_vm_update_pages(adev
, ib
, last_pde
,
494 last_pt
, count
, incr
,
495 AMDGPU_PTE_VALID
, 0);
507 amdgpu_vm_update_pages(adev
, ib
, last_pde
, last_pt
, count
,
508 incr
, AMDGPU_PTE_VALID
, 0);
510 if (ib
->length_dw
!= 0) {
511 amdgpu_vm_pad_ib(adev
, ib
);
512 amdgpu_sync_resv(adev
, &ib
->sync
, pd
->tbo
.resv
, AMDGPU_FENCE_OWNER_VM
);
513 WARN_ON(ib
->length_dw
> ndw
);
514 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, ib
, 1,
516 AMDGPU_FENCE_OWNER_VM
,
521 amdgpu_bo_fence(pd
, fence
, true);
522 fence_put(vm
->page_directory_fence
);
523 vm
->page_directory_fence
= fence_get(fence
);
527 if (!amdgpu_enable_scheduler
|| ib
->length_dw
== 0) {
528 amdgpu_ib_free(adev
, ib
);
535 amdgpu_ib_free(adev
, ib
);
541 * amdgpu_vm_frag_ptes - add fragment information to PTEs
543 * @adev: amdgpu_device pointer
544 * @ib: IB for the update
545 * @pe_start: first PTE to handle
546 * @pe_end: last PTE to handle
547 * @addr: addr those PTEs should point to
548 * @flags: hw mapping flags
549 * @gtt_flags: GTT hw mapping flags
551 * Global and local mutex must be locked!
553 static void amdgpu_vm_frag_ptes(struct amdgpu_device
*adev
,
554 struct amdgpu_ib
*ib
,
555 uint64_t pe_start
, uint64_t pe_end
,
556 uint64_t addr
, uint32_t flags
,
560 * The MC L1 TLB supports variable sized pages, based on a fragment
561 * field in the PTE. When this field is set to a non-zero value, page
562 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
563 * flags are considered valid for all PTEs within the fragment range
564 * and corresponding mappings are assumed to be physically contiguous.
566 * The L1 TLB can store a single PTE for the whole fragment,
567 * significantly increasing the space available for translation
568 * caching. This leads to large improvements in throughput when the
569 * TLB is under pressure.
571 * The L2 TLB distributes small and large fragments into two
572 * asymmetric partitions. The large fragment cache is significantly
573 * larger. Thus, we try to use large fragments wherever possible.
574 * Userspace can support this by aligning virtual base address and
575 * allocation size to the fragment size.
578 /* SI and newer are optimized for 64KB */
579 uint64_t frag_flags
= AMDGPU_PTE_FRAG_64KB
;
580 uint64_t frag_align
= 0x80;
582 uint64_t frag_start
= ALIGN(pe_start
, frag_align
);
583 uint64_t frag_end
= pe_end
& ~(frag_align
- 1);
587 /* system pages are non continuously */
588 if ((flags
& AMDGPU_PTE_SYSTEM
) || !(flags
& AMDGPU_PTE_VALID
) ||
589 (frag_start
>= frag_end
)) {
591 count
= (pe_end
- pe_start
) / 8;
592 amdgpu_vm_update_pages(adev
, ib
, pe_start
, addr
, count
,
593 AMDGPU_GPU_PAGE_SIZE
, flags
, gtt_flags
);
597 /* handle the 4K area at the beginning */
598 if (pe_start
!= frag_start
) {
599 count
= (frag_start
- pe_start
) / 8;
600 amdgpu_vm_update_pages(adev
, ib
, pe_start
, addr
, count
,
601 AMDGPU_GPU_PAGE_SIZE
, flags
, gtt_flags
);
602 addr
+= AMDGPU_GPU_PAGE_SIZE
* count
;
605 /* handle the area in the middle */
606 count
= (frag_end
- frag_start
) / 8;
607 amdgpu_vm_update_pages(adev
, ib
, frag_start
, addr
, count
,
608 AMDGPU_GPU_PAGE_SIZE
, flags
| frag_flags
,
611 /* handle the 4K area at the end */
612 if (frag_end
!= pe_end
) {
613 addr
+= AMDGPU_GPU_PAGE_SIZE
* count
;
614 count
= (pe_end
- frag_end
) / 8;
615 amdgpu_vm_update_pages(adev
, ib
, frag_end
, addr
, count
,
616 AMDGPU_GPU_PAGE_SIZE
, flags
, gtt_flags
);
621 * amdgpu_vm_update_ptes - make sure that page tables are valid
623 * @adev: amdgpu_device pointer
625 * @start: start of GPU address range
626 * @end: end of GPU address range
627 * @dst: destination address to map to
628 * @flags: mapping flags
630 * Update the page tables in the range @start - @end (cayman+).
632 * Global and local mutex must be locked!
634 static int amdgpu_vm_update_ptes(struct amdgpu_device
*adev
,
635 struct amdgpu_vm
*vm
,
636 struct amdgpu_ib
*ib
,
637 uint64_t start
, uint64_t end
,
638 uint64_t dst
, uint32_t flags
,
641 uint64_t mask
= AMDGPU_VM_PTE_COUNT
- 1;
642 uint64_t last_pte
= ~0, last_dst
= ~0;
643 void *owner
= AMDGPU_FENCE_OWNER_VM
;
647 /* sync to everything on unmapping */
648 if (!(flags
& AMDGPU_PTE_VALID
))
649 owner
= AMDGPU_FENCE_OWNER_UNDEFINED
;
651 /* walk over the address space and update the page tables */
652 for (addr
= start
; addr
< end
; ) {
653 uint64_t pt_idx
= addr
>> amdgpu_vm_block_size
;
654 struct amdgpu_bo
*pt
= vm
->page_tables
[pt_idx
].bo
;
659 amdgpu_sync_resv(adev
, &ib
->sync
, pt
->tbo
.resv
, owner
);
660 r
= reservation_object_reserve_shared(pt
->tbo
.resv
);
664 if ((addr
& ~mask
) == (end
& ~mask
))
667 nptes
= AMDGPU_VM_PTE_COUNT
- (addr
& mask
);
669 pte
= amdgpu_bo_gpu_offset(pt
);
670 pte
+= (addr
& mask
) * 8;
672 if ((last_pte
+ 8 * count
) != pte
) {
675 amdgpu_vm_frag_ptes(adev
, ib
, last_pte
,
676 last_pte
+ 8 * count
,
689 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
693 amdgpu_vm_frag_ptes(adev
, ib
, last_pte
,
694 last_pte
+ 8 * count
,
695 last_dst
, flags
, gtt_flags
);
702 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
704 * @adev: amdgpu_device pointer
706 * @mapping: mapped range and flags to use for the update
707 * @addr: addr to set the area to
708 * @gtt_flags: flags as they are used for GTT
709 * @fence: optional resulting fence
711 * Fill in the page table entries for @mapping.
712 * Returns 0 for success, -EINVAL for failure.
714 * Object have to be reserved and mutex must be locked!
716 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
717 struct amdgpu_vm
*vm
,
718 struct amdgpu_bo_va_mapping
*mapping
,
719 uint64_t addr
, uint32_t gtt_flags
,
720 struct fence
**fence
)
722 struct amdgpu_ring
*ring
= adev
->vm_manager
.vm_pte_funcs_ring
;
723 unsigned nptes
, ncmds
, ndw
;
724 uint32_t flags
= gtt_flags
;
725 struct amdgpu_ib
*ib
;
726 struct fence
*f
= NULL
;
729 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
730 * but in case of something, we filter the flags in first place
732 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
733 flags
&= ~AMDGPU_PTE_READABLE
;
734 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
735 flags
&= ~AMDGPU_PTE_WRITEABLE
;
737 trace_amdgpu_vm_bo_update(mapping
);
739 nptes
= mapping
->it
.last
- mapping
->it
.start
+ 1;
742 * reserve space for one command every (1 << BLOCK_SIZE)
743 * entries or 2k dwords (whatever is smaller)
745 ncmds
= (nptes
>> min(amdgpu_vm_block_size
, 11)) + 1;
750 if ((flags
& AMDGPU_PTE_SYSTEM
) && (flags
== gtt_flags
)) {
751 /* only copy commands needed */
754 } else if (flags
& AMDGPU_PTE_SYSTEM
) {
755 /* header for write data commands */
758 /* body of write data command */
762 /* set page commands needed */
765 /* two extra commands for begin/end of fragment */
769 /* update too big for an IB */
773 ib
= kzalloc(sizeof(struct amdgpu_ib
), GFP_KERNEL
);
777 r
= amdgpu_ib_get(ring
, NULL
, ndw
* 4, ib
);
785 r
= amdgpu_vm_update_ptes(adev
, vm
, ib
, mapping
->it
.start
,
786 mapping
->it
.last
+ 1, addr
+ mapping
->offset
,
790 amdgpu_ib_free(adev
, ib
);
795 amdgpu_vm_pad_ib(adev
, ib
);
796 WARN_ON(ib
->length_dw
> ndw
);
797 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, ib
, 1,
799 AMDGPU_FENCE_OWNER_VM
,
804 amdgpu_bo_fence(vm
->page_directory
, f
, true);
807 *fence
= fence_get(f
);
810 if (!amdgpu_enable_scheduler
) {
811 amdgpu_ib_free(adev
, ib
);
817 amdgpu_ib_free(adev
, ib
);
823 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
825 * @adev: amdgpu_device pointer
826 * @bo_va: requested BO and VM object
829 * Fill in the page table entries for @bo_va.
830 * Returns 0 for success, -EINVAL for failure.
832 * Object have to be reserved and mutex must be locked!
834 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
835 struct amdgpu_bo_va
*bo_va
,
836 struct ttm_mem_reg
*mem
)
838 struct amdgpu_vm
*vm
= bo_va
->vm
;
839 struct amdgpu_bo_va_mapping
*mapping
;
845 addr
= (u64
)mem
->start
<< PAGE_SHIFT
;
846 if (mem
->mem_type
!= TTM_PL_TT
)
847 addr
+= adev
->vm_manager
.vram_base_offset
;
852 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo_va
->bo
->tbo
.ttm
, mem
);
854 spin_lock(&vm
->status_lock
);
855 if (!list_empty(&bo_va
->vm_status
))
856 list_splice_init(&bo_va
->valids
, &bo_va
->invalids
);
857 spin_unlock(&vm
->status_lock
);
859 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
860 r
= amdgpu_vm_bo_update_mapping(adev
, vm
, mapping
, addr
,
861 flags
, &bo_va
->last_pt_update
);
866 if (trace_amdgpu_vm_bo_mapping_enabled()) {
867 list_for_each_entry(mapping
, &bo_va
->valids
, list
)
868 trace_amdgpu_vm_bo_mapping(mapping
);
870 list_for_each_entry(mapping
, &bo_va
->invalids
, list
)
871 trace_amdgpu_vm_bo_mapping(mapping
);
874 spin_lock(&vm
->status_lock
);
875 list_splice_init(&bo_va
->invalids
, &bo_va
->valids
);
876 list_del_init(&bo_va
->vm_status
);
878 list_add(&bo_va
->vm_status
, &vm
->cleared
);
879 spin_unlock(&vm
->status_lock
);
885 * amdgpu_vm_clear_freed - clear freed BOs in the PT
887 * @adev: amdgpu_device pointer
890 * Make sure all freed BOs are cleared in the PT.
891 * Returns 0 for success.
893 * PTs have to be reserved and mutex must be locked!
895 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
896 struct amdgpu_vm
*vm
)
898 struct amdgpu_bo_va_mapping
*mapping
;
901 spin_lock(&vm
->freed_lock
);
902 while (!list_empty(&vm
->freed
)) {
903 mapping
= list_first_entry(&vm
->freed
,
904 struct amdgpu_bo_va_mapping
, list
);
905 list_del(&mapping
->list
);
906 spin_unlock(&vm
->freed_lock
);
907 r
= amdgpu_vm_bo_update_mapping(adev
, vm
, mapping
, 0, 0, NULL
);
912 spin_lock(&vm
->freed_lock
);
914 spin_unlock(&vm
->freed_lock
);
921 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
923 * @adev: amdgpu_device pointer
926 * Make sure all invalidated BOs are cleared in the PT.
927 * Returns 0 for success.
929 * PTs have to be reserved and mutex must be locked!
931 int amdgpu_vm_clear_invalids(struct amdgpu_device
*adev
,
932 struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
934 struct amdgpu_bo_va
*bo_va
= NULL
;
937 spin_lock(&vm
->status_lock
);
938 while (!list_empty(&vm
->invalidated
)) {
939 bo_va
= list_first_entry(&vm
->invalidated
,
940 struct amdgpu_bo_va
, vm_status
);
941 spin_unlock(&vm
->status_lock
);
942 mutex_lock(&bo_va
->mutex
);
943 r
= amdgpu_vm_bo_update(adev
, bo_va
, NULL
);
944 mutex_unlock(&bo_va
->mutex
);
948 spin_lock(&vm
->status_lock
);
950 spin_unlock(&vm
->status_lock
);
953 r
= amdgpu_sync_fence(adev
, sync
, bo_va
->last_pt_update
);
959 * amdgpu_vm_bo_add - add a bo to a specific vm
961 * @adev: amdgpu_device pointer
963 * @bo: amdgpu buffer object
965 * Add @bo into the requested vm (cayman+).
966 * Add @bo to the list of bos associated with the vm
967 * Returns newly added bo_va or NULL for failure
969 * Object has to be reserved!
971 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
972 struct amdgpu_vm
*vm
,
973 struct amdgpu_bo
*bo
)
975 struct amdgpu_bo_va
*bo_va
;
977 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
983 bo_va
->ref_count
= 1;
984 INIT_LIST_HEAD(&bo_va
->bo_list
);
985 INIT_LIST_HEAD(&bo_va
->valids
);
986 INIT_LIST_HEAD(&bo_va
->invalids
);
987 INIT_LIST_HEAD(&bo_va
->vm_status
);
988 mutex_init(&bo_va
->mutex
);
989 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
995 * amdgpu_vm_bo_map - map bo inside a vm
997 * @adev: amdgpu_device pointer
998 * @bo_va: bo_va to store the address
999 * @saddr: where to map the BO
1000 * @offset: requested offset in the BO
1001 * @flags: attributes of pages (read/write/valid/etc.)
1003 * Add a mapping of the BO at the specefied addr into the VM.
1004 * Returns 0 for success, error for failure.
1006 * Object has to be reserved and unreserved outside!
1008 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
1009 struct amdgpu_bo_va
*bo_va
,
1010 uint64_t saddr
, uint64_t offset
,
1011 uint64_t size
, uint32_t flags
)
1013 struct amdgpu_bo_va_mapping
*mapping
;
1014 struct amdgpu_vm
*vm
= bo_va
->vm
;
1015 struct interval_tree_node
*it
;
1016 unsigned last_pfn
, pt_idx
;
1020 /* validate the parameters */
1021 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
1022 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
)
1025 /* make sure object fit at this offset */
1026 eaddr
= saddr
+ size
;
1027 if ((saddr
>= eaddr
) || (offset
+ size
> amdgpu_bo_size(bo_va
->bo
)))
1030 last_pfn
= eaddr
/ AMDGPU_GPU_PAGE_SIZE
;
1031 if (last_pfn
> adev
->vm_manager
.max_pfn
) {
1032 dev_err(adev
->dev
, "va above limit (0x%08X > 0x%08X)\n",
1033 last_pfn
, adev
->vm_manager
.max_pfn
);
1037 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1038 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
1040 spin_lock(&vm
->it_lock
);
1041 it
= interval_tree_iter_first(&vm
->va
, saddr
, eaddr
- 1);
1042 spin_unlock(&vm
->it_lock
);
1044 struct amdgpu_bo_va_mapping
*tmp
;
1045 tmp
= container_of(it
, struct amdgpu_bo_va_mapping
, it
);
1046 /* bo and tmp overlap, invalid addr */
1047 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
1048 "0x%010lx-0x%010lx\n", bo_va
->bo
, saddr
, eaddr
,
1049 tmp
->it
.start
, tmp
->it
.last
+ 1);
1054 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
1060 INIT_LIST_HEAD(&mapping
->list
);
1061 mapping
->it
.start
= saddr
;
1062 mapping
->it
.last
= eaddr
- 1;
1063 mapping
->offset
= offset
;
1064 mapping
->flags
= flags
;
1066 mutex_lock(&bo_va
->mutex
);
1067 list_add(&mapping
->list
, &bo_va
->invalids
);
1068 mutex_unlock(&bo_va
->mutex
);
1069 spin_lock(&vm
->it_lock
);
1070 interval_tree_insert(&mapping
->it
, &vm
->va
);
1071 spin_unlock(&vm
->it_lock
);
1072 trace_amdgpu_vm_bo_map(bo_va
, mapping
);
1074 /* Make sure the page tables are allocated */
1075 saddr
>>= amdgpu_vm_block_size
;
1076 eaddr
>>= amdgpu_vm_block_size
;
1078 BUG_ON(eaddr
>= amdgpu_vm_num_pdes(adev
));
1080 if (eaddr
> vm
->max_pde_used
)
1081 vm
->max_pde_used
= eaddr
;
1083 /* walk over the address space and allocate the page tables */
1084 for (pt_idx
= saddr
; pt_idx
<= eaddr
; ++pt_idx
) {
1085 struct reservation_object
*resv
= vm
->page_directory
->tbo
.resv
;
1086 struct amdgpu_bo
*pt
;
1088 if (vm
->page_tables
[pt_idx
].bo
)
1091 r
= amdgpu_bo_create(adev
, AMDGPU_VM_PTE_COUNT
* 8,
1092 AMDGPU_GPU_PAGE_SIZE
, true,
1093 AMDGPU_GEM_DOMAIN_VRAM
,
1094 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
,
1099 r
= amdgpu_vm_clear_bo(adev
, pt
);
1101 amdgpu_bo_unref(&pt
);
1105 vm
->page_tables
[pt_idx
].addr
= 0;
1106 vm
->page_tables
[pt_idx
].bo
= pt
;
1112 list_del(&mapping
->list
);
1113 spin_lock(&vm
->it_lock
);
1114 interval_tree_remove(&mapping
->it
, &vm
->va
);
1115 spin_unlock(&vm
->it_lock
);
1116 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1124 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1126 * @adev: amdgpu_device pointer
1127 * @bo_va: bo_va to remove the address from
1128 * @saddr: where to the BO is mapped
1130 * Remove a mapping of the BO at the specefied addr from the VM.
1131 * Returns 0 for success, error for failure.
1133 * Object has to be reserved and unreserved outside!
1135 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
1136 struct amdgpu_bo_va
*bo_va
,
1139 struct amdgpu_bo_va_mapping
*mapping
;
1140 struct amdgpu_vm
*vm
= bo_va
->vm
;
1143 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1144 mutex_lock(&bo_va
->mutex
);
1145 list_for_each_entry(mapping
, &bo_va
->valids
, list
) {
1146 if (mapping
->it
.start
== saddr
)
1150 if (&mapping
->list
== &bo_va
->valids
) {
1153 list_for_each_entry(mapping
, &bo_va
->invalids
, list
) {
1154 if (mapping
->it
.start
== saddr
)
1158 if (&mapping
->list
== &bo_va
->invalids
) {
1159 mutex_unlock(&bo_va
->mutex
);
1163 mutex_unlock(&bo_va
->mutex
);
1164 list_del(&mapping
->list
);
1165 spin_lock(&vm
->it_lock
);
1166 interval_tree_remove(&mapping
->it
, &vm
->va
);
1167 spin_unlock(&vm
->it_lock
);
1168 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1171 spin_lock(&vm
->freed_lock
);
1172 list_add(&mapping
->list
, &vm
->freed
);
1173 spin_unlock(&vm
->freed_lock
);
1182 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1184 * @adev: amdgpu_device pointer
1185 * @bo_va: requested bo_va
1187 * Remove @bo_va->bo from the requested vm (cayman+).
1189 * Object have to be reserved!
1191 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
1192 struct amdgpu_bo_va
*bo_va
)
1194 struct amdgpu_bo_va_mapping
*mapping
, *next
;
1195 struct amdgpu_vm
*vm
= bo_va
->vm
;
1197 list_del(&bo_va
->bo_list
);
1199 spin_lock(&vm
->status_lock
);
1200 list_del(&bo_va
->vm_status
);
1201 spin_unlock(&vm
->status_lock
);
1203 list_for_each_entry_safe(mapping
, next
, &bo_va
->valids
, list
) {
1204 list_del(&mapping
->list
);
1205 spin_lock(&vm
->it_lock
);
1206 interval_tree_remove(&mapping
->it
, &vm
->va
);
1207 spin_unlock(&vm
->it_lock
);
1208 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1209 spin_lock(&vm
->freed_lock
);
1210 list_add(&mapping
->list
, &vm
->freed
);
1211 spin_unlock(&vm
->freed_lock
);
1213 list_for_each_entry_safe(mapping
, next
, &bo_va
->invalids
, list
) {
1214 list_del(&mapping
->list
);
1215 spin_lock(&vm
->it_lock
);
1216 interval_tree_remove(&mapping
->it
, &vm
->va
);
1217 spin_unlock(&vm
->it_lock
);
1220 fence_put(bo_va
->last_pt_update
);
1221 mutex_destroy(&bo_va
->mutex
);
1226 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1228 * @adev: amdgpu_device pointer
1230 * @bo: amdgpu buffer object
1232 * Mark @bo as invalid (cayman+).
1234 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
1235 struct amdgpu_bo
*bo
)
1237 struct amdgpu_bo_va
*bo_va
;
1239 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
1240 spin_lock(&bo_va
->vm
->status_lock
);
1241 if (list_empty(&bo_va
->vm_status
))
1242 list_add(&bo_va
->vm_status
, &bo_va
->vm
->invalidated
);
1243 spin_unlock(&bo_va
->vm
->status_lock
);
1248 * amdgpu_vm_init - initialize a vm instance
1250 * @adev: amdgpu_device pointer
1253 * Init @vm fields (cayman+).
1255 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1257 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
1258 AMDGPU_VM_PTE_COUNT
* 8);
1259 unsigned pd_size
, pd_entries
, pts_size
;
1262 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
1264 vm
->ids
[i
].flushed_updates
= NULL
;
1267 spin_lock_init(&vm
->status_lock
);
1268 INIT_LIST_HEAD(&vm
->invalidated
);
1269 INIT_LIST_HEAD(&vm
->cleared
);
1270 INIT_LIST_HEAD(&vm
->freed
);
1271 spin_lock_init(&vm
->it_lock
);
1272 spin_lock_init(&vm
->freed_lock
);
1273 pd_size
= amdgpu_vm_directory_size(adev
);
1274 pd_entries
= amdgpu_vm_num_pdes(adev
);
1276 /* allocate page table array */
1277 pts_size
= pd_entries
* sizeof(struct amdgpu_vm_pt
);
1278 vm
->page_tables
= kzalloc(pts_size
, GFP_KERNEL
);
1279 if (vm
->page_tables
== NULL
) {
1280 DRM_ERROR("Cannot allocate memory for page table array\n");
1284 vm
->page_directory_fence
= NULL
;
1286 r
= amdgpu_bo_create(adev
, pd_size
, align
, true,
1287 AMDGPU_GEM_DOMAIN_VRAM
,
1288 AMDGPU_GEM_CREATE_NO_CPU_ACCESS
,
1289 NULL
, NULL
, &vm
->page_directory
);
1292 r
= amdgpu_bo_reserve(vm
->page_directory
, false);
1294 amdgpu_bo_unref(&vm
->page_directory
);
1295 vm
->page_directory
= NULL
;
1298 r
= amdgpu_vm_clear_bo(adev
, vm
->page_directory
);
1299 amdgpu_bo_unreserve(vm
->page_directory
);
1301 amdgpu_bo_unref(&vm
->page_directory
);
1302 vm
->page_directory
= NULL
;
1310 * amdgpu_vm_fini - tear down a vm instance
1312 * @adev: amdgpu_device pointer
1315 * Tear down @vm (cayman+).
1316 * Unbind the VM and remove all bos from the vm bo list
1318 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1320 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
1323 if (!RB_EMPTY_ROOT(&vm
->va
)) {
1324 dev_err(adev
->dev
, "still active bo inside vm\n");
1326 rbtree_postorder_for_each_entry_safe(mapping
, tmp
, &vm
->va
, it
.rb
) {
1327 list_del(&mapping
->list
);
1328 interval_tree_remove(&mapping
->it
, &vm
->va
);
1331 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
1332 list_del(&mapping
->list
);
1336 for (i
= 0; i
< amdgpu_vm_num_pdes(adev
); i
++)
1337 amdgpu_bo_unref(&vm
->page_tables
[i
].bo
);
1338 kfree(vm
->page_tables
);
1340 amdgpu_bo_unref(&vm
->page_directory
);
1341 fence_put(vm
->page_directory_fence
);
1342 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
1343 unsigned id
= vm
->ids
[i
].id
;
1345 atomic_long_cmpxchg(&adev
->vm_manager
.ids
[id
].owner
,
1347 fence_put(vm
->ids
[i
].flushed_updates
);
1353 * amdgpu_vm_manager_fini - cleanup VM manager
1355 * @adev: amdgpu_device pointer
1357 * Cleanup the VM manager and free resources.
1359 void amdgpu_vm_manager_fini(struct amdgpu_device
*adev
)
1363 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
)
1364 fence_put(adev
->vm_manager
.ids
[i
].active
);