2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
29 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_trace.h"
35 * GPUVM is similar to the legacy gart on older asics, however
36 * rather than there being a single global gart table
37 * for the entire GPU, there are multiple VM page tables active
38 * at any given time. The VM page tables can contain a mix
39 * vram pages and system memory pages and system memory pages
40 * can be mapped as snooped (cached system pages) or unsnooped
41 * (uncached system pages).
42 * Each VM has an ID associated with it and there is a page table
43 * associated with each VMID. When execting a command buffer,
44 * the kernel tells the the ring what VMID to use for that command
45 * buffer. VMIDs are allocated dynamically as commands are submitted.
46 * The userspace drivers maintain their own address space and the kernel
47 * sets up their pages tables accordingly when they submit their
48 * command buffers and a VMID is assigned.
49 * Cayman/Trinity support up to 8 active VMs at any given time;
54 * amdgpu_vm_num_pde - return the number of page directory entries
56 * @adev: amdgpu_device pointer
58 * Calculate the number of page directory entries (cayman+).
60 static unsigned amdgpu_vm_num_pdes(struct amdgpu_device
*adev
)
62 return adev
->vm_manager
.max_pfn
>> amdgpu_vm_block_size
;
66 * amdgpu_vm_directory_size - returns the size of the page directory in bytes
68 * @adev: amdgpu_device pointer
70 * Calculate the size of the page directory in bytes (cayman+).
72 static unsigned amdgpu_vm_directory_size(struct amdgpu_device
*adev
)
74 return AMDGPU_GPU_PAGE_ALIGN(amdgpu_vm_num_pdes(adev
) * 8);
78 * amdgpu_vm_get_bos - add the vm BOs to a validation list
80 * @vm: vm providing the BOs
81 * @head: head of validation list
83 * Add the page directory to the list of BOs to
84 * validate for command submission (cayman+).
86 struct amdgpu_bo_list_entry
*amdgpu_vm_get_bos(struct amdgpu_device
*adev
,
88 struct list_head
*head
)
90 struct amdgpu_bo_list_entry
*list
;
93 mutex_lock(&vm
->mutex
);
94 list
= drm_malloc_ab(vm
->max_pde_used
+ 2,
95 sizeof(struct amdgpu_bo_list_entry
));
97 mutex_unlock(&vm
->mutex
);
101 /* add the vm page table to the list */
102 list
[0].robj
= vm
->page_directory
;
103 list
[0].prefered_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
104 list
[0].allowed_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
105 list
[0].priority
= 0;
106 list
[0].tv
.bo
= &vm
->page_directory
->tbo
;
107 list
[0].tv
.shared
= true;
108 list_add(&list
[0].tv
.head
, head
);
110 for (i
= 0, idx
= 1; i
<= vm
->max_pde_used
; i
++) {
111 if (!vm
->page_tables
[i
].bo
)
114 list
[idx
].robj
= vm
->page_tables
[i
].bo
;
115 list
[idx
].prefered_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
116 list
[idx
].allowed_domains
= AMDGPU_GEM_DOMAIN_VRAM
;
117 list
[idx
].priority
= 0;
118 list
[idx
].tv
.bo
= &list
[idx
].robj
->tbo
;
119 list
[idx
].tv
.shared
= true;
120 list_add(&list
[idx
++].tv
.head
, head
);
122 mutex_unlock(&vm
->mutex
);
128 * amdgpu_vm_grab_id - allocate the next free VMID
130 * @ring: ring we want to submit job to
131 * @vm: vm to allocate id for
133 * Allocate an id for the vm (cayman+).
134 * Returns the fence we need to sync to (if any).
136 * Global and local mutex must be locked!
138 struct amdgpu_fence
*amdgpu_vm_grab_id(struct amdgpu_ring
*ring
,
139 struct amdgpu_vm
*vm
)
141 struct amdgpu_fence
*best
[AMDGPU_MAX_RINGS
] = {};
142 struct amdgpu_vm_id
*vm_id
= &vm
->ids
[ring
->idx
];
143 struct amdgpu_device
*adev
= ring
->adev
;
145 unsigned choices
[2] = {};
148 /* check if the id is still valid */
149 if (vm_id
->id
&& vm_id
->last_id_use
&&
150 vm_id
->last_id_use
== adev
->vm_manager
.active
[vm_id
->id
])
153 /* we definately need to flush */
154 vm_id
->pd_gpu_addr
= ~0ll;
156 /* skip over VMID 0, since it is the system VM */
157 for (i
= 1; i
< adev
->vm_manager
.nvm
; ++i
) {
158 struct amdgpu_fence
*fence
= adev
->vm_manager
.active
[i
];
161 /* found a free one */
163 trace_amdgpu_vm_grab_id(i
, ring
->idx
);
167 if (amdgpu_fence_is_earlier(fence
, best
[fence
->ring
->idx
])) {
168 best
[fence
->ring
->idx
] = fence
;
169 choices
[fence
->ring
== ring
? 0 : 1] = i
;
173 for (i
= 0; i
< 2; ++i
) {
175 vm_id
->id
= choices
[i
];
176 trace_amdgpu_vm_grab_id(choices
[i
], ring
->idx
);
177 return adev
->vm_manager
.active
[choices
[i
]];
181 /* should never happen */
187 * amdgpu_vm_flush - hardware flush the vm
189 * @ring: ring to use for flush
190 * @vm: vm we want to flush
191 * @updates: last vm update that we waited for
193 * Flush the vm (cayman+).
195 * Global and local mutex must be locked!
197 void amdgpu_vm_flush(struct amdgpu_ring
*ring
,
198 struct amdgpu_vm
*vm
,
199 struct amdgpu_fence
*updates
)
201 uint64_t pd_addr
= amdgpu_bo_gpu_offset(vm
->page_directory
);
202 struct amdgpu_vm_id
*vm_id
= &vm
->ids
[ring
->idx
];
204 if (pd_addr
!= vm_id
->pd_gpu_addr
|| !vm_id
->flushed_updates
||
205 amdgpu_fence_is_earlier(vm_id
->flushed_updates
, updates
)) {
207 trace_amdgpu_vm_flush(pd_addr
, ring
->idx
, vm_id
->id
);
208 amdgpu_fence_unref(&vm_id
->flushed_updates
);
209 vm_id
->flushed_updates
= amdgpu_fence_ref(updates
);
210 vm_id
->pd_gpu_addr
= pd_addr
;
211 amdgpu_ring_emit_vm_flush(ring
, vm_id
->id
, vm_id
->pd_gpu_addr
);
216 * amdgpu_vm_fence - remember fence for vm
218 * @adev: amdgpu_device pointer
219 * @vm: vm we want to fence
220 * @fence: fence to remember
222 * Fence the vm (cayman+).
223 * Set the fence used to protect page table and id.
225 * Global and local mutex must be locked!
227 void amdgpu_vm_fence(struct amdgpu_device
*adev
,
228 struct amdgpu_vm
*vm
,
229 struct amdgpu_fence
*fence
)
231 unsigned ridx
= fence
->ring
->idx
;
232 unsigned vm_id
= vm
->ids
[ridx
].id
;
234 amdgpu_fence_unref(&adev
->vm_manager
.active
[vm_id
]);
235 adev
->vm_manager
.active
[vm_id
] = amdgpu_fence_ref(fence
);
237 amdgpu_fence_unref(&vm
->ids
[ridx
].last_id_use
);
238 vm
->ids
[ridx
].last_id_use
= amdgpu_fence_ref(fence
);
242 * amdgpu_vm_bo_find - find the bo_va for a specific vm & bo
245 * @bo: requested buffer object
247 * Find @bo inside the requested vm (cayman+).
248 * Search inside the @bos vm list for the requested vm
249 * Returns the found bo_va or NULL if none is found
251 * Object has to be reserved!
253 struct amdgpu_bo_va
*amdgpu_vm_bo_find(struct amdgpu_vm
*vm
,
254 struct amdgpu_bo
*bo
)
256 struct amdgpu_bo_va
*bo_va
;
258 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
259 if (bo_va
->vm
== vm
) {
267 * amdgpu_vm_update_pages - helper to call the right asic function
269 * @adev: amdgpu_device pointer
270 * @ib: indirect buffer to fill with commands
271 * @pe: addr of the page entry
272 * @addr: dst addr to write into pe
273 * @count: number of page entries to update
274 * @incr: increase next addr by incr bytes
275 * @flags: hw access flags
276 * @gtt_flags: GTT hw access flags
278 * Traces the parameters and calls the right asic functions
279 * to setup the page table using the DMA.
281 static void amdgpu_vm_update_pages(struct amdgpu_device
*adev
,
282 struct amdgpu_ib
*ib
,
283 uint64_t pe
, uint64_t addr
,
284 unsigned count
, uint32_t incr
,
285 uint32_t flags
, uint32_t gtt_flags
)
287 trace_amdgpu_vm_set_page(pe
, addr
, count
, incr
, flags
);
289 if ((flags
& AMDGPU_PTE_SYSTEM
) && (flags
== gtt_flags
)) {
290 uint64_t src
= adev
->gart
.table_addr
+ (addr
>> 12) * 8;
291 amdgpu_vm_copy_pte(adev
, ib
, pe
, src
, count
);
293 } else if ((flags
& AMDGPU_PTE_SYSTEM
) || (count
< 3)) {
294 amdgpu_vm_write_pte(adev
, ib
, pe
, addr
,
298 amdgpu_vm_set_pte_pde(adev
, ib
, pe
, addr
,
304 * amdgpu_vm_clear_bo - initially clear the page dir/table
306 * @adev: amdgpu_device pointer
309 static int amdgpu_vm_clear_bo(struct amdgpu_device
*adev
,
310 struct amdgpu_bo
*bo
)
312 struct amdgpu_ring
*ring
= adev
->vm_manager
.vm_pte_funcs_ring
;
318 r
= amdgpu_bo_reserve(bo
, false);
322 r
= reservation_object_reserve_shared(bo
->tbo
.resv
);
326 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
328 goto error_unreserve
;
330 addr
= amdgpu_bo_gpu_offset(bo
);
331 entries
= amdgpu_bo_size(bo
) / 8;
333 r
= amdgpu_ib_get(ring
, NULL
, entries
* 2 + 64, &ib
);
335 goto error_unreserve
;
339 amdgpu_vm_update_pages(adev
, &ib
, addr
, 0, entries
, 0, 0, 0);
340 amdgpu_vm_pad_ib(adev
, &ib
);
341 WARN_ON(ib
.length_dw
> 64);
343 r
= amdgpu_ib_schedule(adev
, 1, &ib
, AMDGPU_FENCE_OWNER_VM
);
347 amdgpu_bo_fence(bo
, ib
.fence
, true);
350 amdgpu_ib_free(adev
, &ib
);
353 amdgpu_bo_unreserve(bo
);
358 * amdgpu_vm_map_gart - get the physical address of a gart page
360 * @adev: amdgpu_device pointer
361 * @addr: the unmapped addr
363 * Look up the physical address of the page that the pte resolves
365 * Returns the physical address of the page.
367 uint64_t amdgpu_vm_map_gart(struct amdgpu_device
*adev
, uint64_t addr
)
371 /* page table offset */
372 result
= adev
->gart
.pages_addr
[addr
>> PAGE_SHIFT
];
374 /* in case cpu page size != gpu page size*/
375 result
|= addr
& (~PAGE_MASK
);
381 * amdgpu_vm_update_pdes - make sure that page directory is valid
383 * @adev: amdgpu_device pointer
385 * @start: start of GPU address range
386 * @end: end of GPU address range
388 * Allocates new page tables if necessary
389 * and updates the page directory (cayman+).
390 * Returns 0 for success, error for failure.
392 * Global and local mutex must be locked!
394 int amdgpu_vm_update_page_directory(struct amdgpu_device
*adev
,
395 struct amdgpu_vm
*vm
)
397 struct amdgpu_ring
*ring
= adev
->vm_manager
.vm_pte_funcs_ring
;
398 struct amdgpu_bo
*pd
= vm
->page_directory
;
399 uint64_t pd_addr
= amdgpu_bo_gpu_offset(pd
);
400 uint32_t incr
= AMDGPU_VM_PTE_COUNT
* 8;
401 uint64_t last_pde
= ~0, last_pt
= ~0;
402 unsigned count
= 0, pt_idx
, ndw
;
409 /* assume the worst case */
410 ndw
+= vm
->max_pde_used
* 6;
412 /* update too big for an IB */
416 r
= amdgpu_ib_get(ring
, NULL
, ndw
* 4, &ib
);
421 /* walk over the address space and update the page directory */
422 for (pt_idx
= 0; pt_idx
<= vm
->max_pde_used
; ++pt_idx
) {
423 struct amdgpu_bo
*bo
= vm
->page_tables
[pt_idx
].bo
;
429 pt
= amdgpu_bo_gpu_offset(bo
);
430 if (vm
->page_tables
[pt_idx
].addr
== pt
)
432 vm
->page_tables
[pt_idx
].addr
= pt
;
434 pde
= pd_addr
+ pt_idx
* 8;
435 if (((last_pde
+ 8 * count
) != pde
) ||
436 ((last_pt
+ incr
* count
) != pt
)) {
439 amdgpu_vm_update_pages(adev
, &ib
, last_pde
,
440 last_pt
, count
, incr
,
441 AMDGPU_PTE_VALID
, 0);
453 amdgpu_vm_update_pages(adev
, &ib
, last_pde
, last_pt
, count
,
454 incr
, AMDGPU_PTE_VALID
, 0);
456 if (ib
.length_dw
!= 0) {
457 amdgpu_vm_pad_ib(adev
, &ib
);
458 amdgpu_sync_resv(adev
, &ib
.sync
, pd
->tbo
.resv
, AMDGPU_FENCE_OWNER_VM
);
459 WARN_ON(ib
.length_dw
> ndw
);
460 r
= amdgpu_ib_schedule(adev
, 1, &ib
, AMDGPU_FENCE_OWNER_VM
);
462 amdgpu_ib_free(adev
, &ib
);
465 amdgpu_bo_fence(pd
, ib
.fence
, true);
467 amdgpu_ib_free(adev
, &ib
);
473 * amdgpu_vm_frag_ptes - add fragment information to PTEs
475 * @adev: amdgpu_device pointer
476 * @ib: IB for the update
477 * @pe_start: first PTE to handle
478 * @pe_end: last PTE to handle
479 * @addr: addr those PTEs should point to
480 * @flags: hw mapping flags
481 * @gtt_flags: GTT hw mapping flags
483 * Global and local mutex must be locked!
485 static void amdgpu_vm_frag_ptes(struct amdgpu_device
*adev
,
486 struct amdgpu_ib
*ib
,
487 uint64_t pe_start
, uint64_t pe_end
,
488 uint64_t addr
, uint32_t flags
,
492 * The MC L1 TLB supports variable sized pages, based on a fragment
493 * field in the PTE. When this field is set to a non-zero value, page
494 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
495 * flags are considered valid for all PTEs within the fragment range
496 * and corresponding mappings are assumed to be physically contiguous.
498 * The L1 TLB can store a single PTE for the whole fragment,
499 * significantly increasing the space available for translation
500 * caching. This leads to large improvements in throughput when the
501 * TLB is under pressure.
503 * The L2 TLB distributes small and large fragments into two
504 * asymmetric partitions. The large fragment cache is significantly
505 * larger. Thus, we try to use large fragments wherever possible.
506 * Userspace can support this by aligning virtual base address and
507 * allocation size to the fragment size.
510 /* SI and newer are optimized for 64KB */
511 uint64_t frag_flags
= AMDGPU_PTE_FRAG_64KB
;
512 uint64_t frag_align
= 0x80;
514 uint64_t frag_start
= ALIGN(pe_start
, frag_align
);
515 uint64_t frag_end
= pe_end
& ~(frag_align
- 1);
519 /* system pages are non continuously */
520 if ((flags
& AMDGPU_PTE_SYSTEM
) || !(flags
& AMDGPU_PTE_VALID
) ||
521 (frag_start
>= frag_end
)) {
523 count
= (pe_end
- pe_start
) / 8;
524 amdgpu_vm_update_pages(adev
, ib
, pe_start
, addr
, count
,
525 AMDGPU_GPU_PAGE_SIZE
, flags
, gtt_flags
);
529 /* handle the 4K area at the beginning */
530 if (pe_start
!= frag_start
) {
531 count
= (frag_start
- pe_start
) / 8;
532 amdgpu_vm_update_pages(adev
, ib
, pe_start
, addr
, count
,
533 AMDGPU_GPU_PAGE_SIZE
, flags
, gtt_flags
);
534 addr
+= AMDGPU_GPU_PAGE_SIZE
* count
;
537 /* handle the area in the middle */
538 count
= (frag_end
- frag_start
) / 8;
539 amdgpu_vm_update_pages(adev
, ib
, frag_start
, addr
, count
,
540 AMDGPU_GPU_PAGE_SIZE
, flags
| frag_flags
,
543 /* handle the 4K area at the end */
544 if (frag_end
!= pe_end
) {
545 addr
+= AMDGPU_GPU_PAGE_SIZE
* count
;
546 count
= (pe_end
- frag_end
) / 8;
547 amdgpu_vm_update_pages(adev
, ib
, frag_end
, addr
, count
,
548 AMDGPU_GPU_PAGE_SIZE
, flags
, gtt_flags
);
553 * amdgpu_vm_update_ptes - make sure that page tables are valid
555 * @adev: amdgpu_device pointer
557 * @start: start of GPU address range
558 * @end: end of GPU address range
559 * @dst: destination address to map to
560 * @flags: mapping flags
562 * Update the page tables in the range @start - @end (cayman+).
564 * Global and local mutex must be locked!
566 static int amdgpu_vm_update_ptes(struct amdgpu_device
*adev
,
567 struct amdgpu_vm
*vm
,
568 struct amdgpu_ib
*ib
,
569 uint64_t start
, uint64_t end
,
570 uint64_t dst
, uint32_t flags
,
573 uint64_t mask
= AMDGPU_VM_PTE_COUNT
- 1;
574 uint64_t last_pte
= ~0, last_dst
= ~0;
578 /* walk over the address space and update the page tables */
579 for (addr
= start
; addr
< end
; ) {
580 uint64_t pt_idx
= addr
>> amdgpu_vm_block_size
;
581 struct amdgpu_bo
*pt
= vm
->page_tables
[pt_idx
].bo
;
586 amdgpu_sync_resv(adev
, &ib
->sync
, pt
->tbo
.resv
,
587 AMDGPU_FENCE_OWNER_VM
);
588 r
= reservation_object_reserve_shared(pt
->tbo
.resv
);
592 if ((addr
& ~mask
) == (end
& ~mask
))
595 nptes
= AMDGPU_VM_PTE_COUNT
- (addr
& mask
);
597 pte
= amdgpu_bo_gpu_offset(pt
);
598 pte
+= (addr
& mask
) * 8;
600 if ((last_pte
+ 8 * count
) != pte
) {
603 amdgpu_vm_frag_ptes(adev
, ib
, last_pte
,
604 last_pte
+ 8 * count
,
617 dst
+= nptes
* AMDGPU_GPU_PAGE_SIZE
;
621 amdgpu_vm_frag_ptes(adev
, ib
, last_pte
,
622 last_pte
+ 8 * count
,
623 last_dst
, flags
, gtt_flags
);
630 * amdgpu_vm_fence_pts - fence page tables after an update
633 * @start: start of GPU address range
634 * @end: end of GPU address range
635 * @fence: fence to use
637 * Fence the page tables in the range @start - @end (cayman+).
639 * Global and local mutex must be locked!
641 static void amdgpu_vm_fence_pts(struct amdgpu_vm
*vm
,
642 uint64_t start
, uint64_t end
,
643 struct amdgpu_fence
*fence
)
647 start
>>= amdgpu_vm_block_size
;
648 end
>>= amdgpu_vm_block_size
;
650 for (i
= start
; i
<= end
; ++i
)
651 amdgpu_bo_fence(vm
->page_tables
[i
].bo
, fence
, true);
655 * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table
657 * @adev: amdgpu_device pointer
659 * @mapping: mapped range and flags to use for the update
660 * @addr: addr to set the area to
661 * @gtt_flags: flags as they are used for GTT
662 * @fence: optional resulting fence
664 * Fill in the page table entries for @mapping.
665 * Returns 0 for success, -EINVAL for failure.
667 * Object have to be reserved and mutex must be locked!
669 static int amdgpu_vm_bo_update_mapping(struct amdgpu_device
*adev
,
670 struct amdgpu_vm
*vm
,
671 struct amdgpu_bo_va_mapping
*mapping
,
672 uint64_t addr
, uint32_t gtt_flags
,
673 struct amdgpu_fence
**fence
)
675 struct amdgpu_ring
*ring
= adev
->vm_manager
.vm_pte_funcs_ring
;
676 unsigned nptes
, ncmds
, ndw
;
677 uint32_t flags
= gtt_flags
;
681 /* normally,bo_va->flags only contians READABLE and WIRTEABLE bit go here
682 * but in case of something, we filter the flags in first place
684 if (!(mapping
->flags
& AMDGPU_PTE_READABLE
))
685 flags
&= ~AMDGPU_PTE_READABLE
;
686 if (!(mapping
->flags
& AMDGPU_PTE_WRITEABLE
))
687 flags
&= ~AMDGPU_PTE_WRITEABLE
;
689 trace_amdgpu_vm_bo_update(mapping
);
691 nptes
= mapping
->it
.last
- mapping
->it
.start
+ 1;
694 * reserve space for one command every (1 << BLOCK_SIZE)
695 * entries or 2k dwords (whatever is smaller)
697 ncmds
= (nptes
>> min(amdgpu_vm_block_size
, 11)) + 1;
702 if ((flags
& AMDGPU_PTE_SYSTEM
) && (flags
== gtt_flags
)) {
703 /* only copy commands needed */
706 } else if (flags
& AMDGPU_PTE_SYSTEM
) {
707 /* header for write data commands */
710 /* body of write data command */
714 /* set page commands needed */
717 /* two extra commands for begin/end of fragment */
721 /* update too big for an IB */
725 r
= amdgpu_ib_get(ring
, NULL
, ndw
* 4, &ib
);
730 if (!(flags
& AMDGPU_PTE_VALID
)) {
733 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
734 struct amdgpu_fence
*f
= vm
->ids
[i
].last_id_use
;
735 amdgpu_sync_fence(&ib
.sync
, f
);
739 r
= amdgpu_vm_update_ptes(adev
, vm
, &ib
, mapping
->it
.start
,
740 mapping
->it
.last
+ 1, addr
+ mapping
->offset
,
744 amdgpu_ib_free(adev
, &ib
);
748 amdgpu_vm_pad_ib(adev
, &ib
);
749 WARN_ON(ib
.length_dw
> ndw
);
751 r
= amdgpu_ib_schedule(adev
, 1, &ib
, AMDGPU_FENCE_OWNER_VM
);
753 amdgpu_ib_free(adev
, &ib
);
756 amdgpu_vm_fence_pts(vm
, mapping
->it
.start
,
757 mapping
->it
.last
+ 1, ib
.fence
);
759 amdgpu_fence_unref(fence
);
760 *fence
= amdgpu_fence_ref(ib
.fence
);
762 amdgpu_ib_free(adev
, &ib
);
768 * amdgpu_vm_bo_update - update all BO mappings in the vm page table
770 * @adev: amdgpu_device pointer
771 * @bo_va: requested BO and VM object
774 * Fill in the page table entries for @bo_va.
775 * Returns 0 for success, -EINVAL for failure.
777 * Object have to be reserved and mutex must be locked!
779 int amdgpu_vm_bo_update(struct amdgpu_device
*adev
,
780 struct amdgpu_bo_va
*bo_va
,
781 struct ttm_mem_reg
*mem
)
783 struct amdgpu_vm
*vm
= bo_va
->vm
;
784 struct amdgpu_bo_va_mapping
*mapping
;
790 addr
= mem
->start
<< PAGE_SHIFT
;
791 if (mem
->mem_type
!= TTM_PL_TT
)
792 addr
+= adev
->vm_manager
.vram_base_offset
;
797 if (addr
== bo_va
->addr
)
800 flags
= amdgpu_ttm_tt_pte_flags(adev
, bo_va
->bo
->tbo
.ttm
, mem
);
802 list_for_each_entry(mapping
, &bo_va
->mappings
, list
) {
803 r
= amdgpu_vm_bo_update_mapping(adev
, vm
, mapping
, addr
,
804 flags
, &bo_va
->last_pt_update
);
810 spin_lock(&vm
->status_lock
);
811 list_del_init(&bo_va
->vm_status
);
812 spin_unlock(&vm
->status_lock
);
818 * amdgpu_vm_clear_freed - clear freed BOs in the PT
820 * @adev: amdgpu_device pointer
823 * Make sure all freed BOs are cleared in the PT.
824 * Returns 0 for success.
826 * PTs have to be reserved and mutex must be locked!
828 int amdgpu_vm_clear_freed(struct amdgpu_device
*adev
,
829 struct amdgpu_vm
*vm
)
831 struct amdgpu_bo_va_mapping
*mapping
;
834 while (!list_empty(&vm
->freed
)) {
835 mapping
= list_first_entry(&vm
->freed
,
836 struct amdgpu_bo_va_mapping
, list
);
837 list_del(&mapping
->list
);
839 r
= amdgpu_vm_bo_update_mapping(adev
, vm
, mapping
, 0, 0, NULL
);
850 * amdgpu_vm_clear_invalids - clear invalidated BOs in the PT
852 * @adev: amdgpu_device pointer
855 * Make sure all invalidated BOs are cleared in the PT.
856 * Returns 0 for success.
858 * PTs have to be reserved and mutex must be locked!
860 int amdgpu_vm_clear_invalids(struct amdgpu_device
*adev
,
861 struct amdgpu_vm
*vm
, struct amdgpu_sync
*sync
)
863 struct amdgpu_bo_va
*bo_va
= NULL
;
866 spin_lock(&vm
->status_lock
);
867 while (!list_empty(&vm
->invalidated
)) {
868 bo_va
= list_first_entry(&vm
->invalidated
,
869 struct amdgpu_bo_va
, vm_status
);
870 spin_unlock(&vm
->status_lock
);
872 r
= amdgpu_vm_bo_update(adev
, bo_va
, NULL
);
876 spin_lock(&vm
->status_lock
);
878 spin_unlock(&vm
->status_lock
);
881 amdgpu_sync_fence(sync
, bo_va
->last_pt_update
);
886 * amdgpu_vm_bo_add - add a bo to a specific vm
888 * @adev: amdgpu_device pointer
890 * @bo: amdgpu buffer object
892 * Add @bo into the requested vm (cayman+).
893 * Add @bo to the list of bos associated with the vm
894 * Returns newly added bo_va or NULL for failure
896 * Object has to be reserved!
898 struct amdgpu_bo_va
*amdgpu_vm_bo_add(struct amdgpu_device
*adev
,
899 struct amdgpu_vm
*vm
,
900 struct amdgpu_bo
*bo
)
902 struct amdgpu_bo_va
*bo_va
;
904 bo_va
= kzalloc(sizeof(struct amdgpu_bo_va
), GFP_KERNEL
);
911 bo_va
->ref_count
= 1;
912 INIT_LIST_HEAD(&bo_va
->bo_list
);
913 INIT_LIST_HEAD(&bo_va
->mappings
);
914 INIT_LIST_HEAD(&bo_va
->vm_status
);
916 mutex_lock(&vm
->mutex
);
917 list_add_tail(&bo_va
->bo_list
, &bo
->va
);
918 mutex_unlock(&vm
->mutex
);
924 * amdgpu_vm_bo_map - map bo inside a vm
926 * @adev: amdgpu_device pointer
927 * @bo_va: bo_va to store the address
928 * @saddr: where to map the BO
929 * @offset: requested offset in the BO
930 * @flags: attributes of pages (read/write/valid/etc.)
932 * Add a mapping of the BO at the specefied addr into the VM.
933 * Returns 0 for success, error for failure.
935 * Object has to be reserved and gets unreserved by this function!
937 int amdgpu_vm_bo_map(struct amdgpu_device
*adev
,
938 struct amdgpu_bo_va
*bo_va
,
939 uint64_t saddr
, uint64_t offset
,
940 uint64_t size
, uint32_t flags
)
942 struct amdgpu_bo_va_mapping
*mapping
;
943 struct amdgpu_vm
*vm
= bo_va
->vm
;
944 struct interval_tree_node
*it
;
945 unsigned last_pfn
, pt_idx
;
949 /* validate the parameters */
950 if (saddr
& AMDGPU_GPU_PAGE_MASK
|| offset
& AMDGPU_GPU_PAGE_MASK
||
951 size
== 0 || size
& AMDGPU_GPU_PAGE_MASK
) {
952 amdgpu_bo_unreserve(bo_va
->bo
);
956 /* make sure object fit at this offset */
957 eaddr
= saddr
+ size
;
958 if ((saddr
>= eaddr
) || (offset
+ size
> amdgpu_bo_size(bo_va
->bo
))) {
959 amdgpu_bo_unreserve(bo_va
->bo
);
963 last_pfn
= eaddr
/ AMDGPU_GPU_PAGE_SIZE
;
964 if (last_pfn
> adev
->vm_manager
.max_pfn
) {
965 dev_err(adev
->dev
, "va above limit (0x%08X > 0x%08X)\n",
966 last_pfn
, adev
->vm_manager
.max_pfn
);
967 amdgpu_bo_unreserve(bo_va
->bo
);
971 mutex_lock(&vm
->mutex
);
973 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
974 eaddr
/= AMDGPU_GPU_PAGE_SIZE
;
976 it
= interval_tree_iter_first(&vm
->va
, saddr
, eaddr
- 1);
978 struct amdgpu_bo_va_mapping
*tmp
;
979 tmp
= container_of(it
, struct amdgpu_bo_va_mapping
, it
);
980 /* bo and tmp overlap, invalid addr */
981 dev_err(adev
->dev
, "bo %p va 0x%010Lx-0x%010Lx conflict with "
982 "0x%010lx-0x%010lx\n", bo_va
->bo
, saddr
, eaddr
,
983 tmp
->it
.start
, tmp
->it
.last
+ 1);
984 amdgpu_bo_unreserve(bo_va
->bo
);
989 mapping
= kmalloc(sizeof(*mapping
), GFP_KERNEL
);
991 amdgpu_bo_unreserve(bo_va
->bo
);
996 INIT_LIST_HEAD(&mapping
->list
);
997 mapping
->it
.start
= saddr
;
998 mapping
->it
.last
= eaddr
- 1;
999 mapping
->offset
= offset
;
1000 mapping
->flags
= flags
;
1002 list_add(&mapping
->list
, &bo_va
->mappings
);
1003 interval_tree_insert(&mapping
->it
, &vm
->va
);
1004 trace_amdgpu_vm_bo_map(bo_va
, mapping
);
1008 /* Make sure the page tables are allocated */
1009 saddr
>>= amdgpu_vm_block_size
;
1010 eaddr
>>= amdgpu_vm_block_size
;
1012 BUG_ON(eaddr
>= amdgpu_vm_num_pdes(adev
));
1014 if (eaddr
> vm
->max_pde_used
)
1015 vm
->max_pde_used
= eaddr
;
1017 amdgpu_bo_unreserve(bo_va
->bo
);
1019 /* walk over the address space and allocate the page tables */
1020 for (pt_idx
= saddr
; pt_idx
<= eaddr
; ++pt_idx
) {
1021 struct amdgpu_bo
*pt
;
1023 if (vm
->page_tables
[pt_idx
].bo
)
1026 /* drop mutex to allocate and clear page table */
1027 mutex_unlock(&vm
->mutex
);
1029 r
= amdgpu_bo_create(adev
, AMDGPU_VM_PTE_COUNT
* 8,
1030 AMDGPU_GPU_PAGE_SIZE
, true,
1031 AMDGPU_GEM_DOMAIN_VRAM
, 0, NULL
, &pt
);
1035 r
= amdgpu_vm_clear_bo(adev
, pt
);
1037 amdgpu_bo_unref(&pt
);
1041 /* aquire mutex again */
1042 mutex_lock(&vm
->mutex
);
1043 if (vm
->page_tables
[pt_idx
].bo
) {
1044 /* someone else allocated the pt in the meantime */
1045 mutex_unlock(&vm
->mutex
);
1046 amdgpu_bo_unref(&pt
);
1047 mutex_lock(&vm
->mutex
);
1051 vm
->page_tables
[pt_idx
].addr
= 0;
1052 vm
->page_tables
[pt_idx
].bo
= pt
;
1055 mutex_unlock(&vm
->mutex
);
1059 mutex_lock(&vm
->mutex
);
1060 list_del(&mapping
->list
);
1061 interval_tree_remove(&mapping
->it
, &vm
->va
);
1062 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1066 mutex_unlock(&vm
->mutex
);
1071 * amdgpu_vm_bo_unmap - remove bo mapping from vm
1073 * @adev: amdgpu_device pointer
1074 * @bo_va: bo_va to remove the address from
1075 * @saddr: where to the BO is mapped
1077 * Remove a mapping of the BO at the specefied addr from the VM.
1078 * Returns 0 for success, error for failure.
1080 * Object has to be reserved and gets unreserved by this function!
1082 int amdgpu_vm_bo_unmap(struct amdgpu_device
*adev
,
1083 struct amdgpu_bo_va
*bo_va
,
1086 struct amdgpu_bo_va_mapping
*mapping
;
1087 struct amdgpu_vm
*vm
= bo_va
->vm
;
1089 saddr
/= AMDGPU_GPU_PAGE_SIZE
;
1091 list_for_each_entry(mapping
, &bo_va
->mappings
, list
) {
1092 if (mapping
->it
.start
== saddr
)
1096 if (&mapping
->list
== &bo_va
->mappings
) {
1097 amdgpu_bo_unreserve(bo_va
->bo
);
1101 mutex_lock(&vm
->mutex
);
1102 list_del(&mapping
->list
);
1103 interval_tree_remove(&mapping
->it
, &vm
->va
);
1104 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1107 /* clear the old address */
1108 list_add(&mapping
->list
, &vm
->freed
);
1112 mutex_unlock(&vm
->mutex
);
1113 amdgpu_bo_unreserve(bo_va
->bo
);
1119 * amdgpu_vm_bo_rmv - remove a bo to a specific vm
1121 * @adev: amdgpu_device pointer
1122 * @bo_va: requested bo_va
1124 * Remove @bo_va->bo from the requested vm (cayman+).
1126 * Object have to be reserved!
1128 void amdgpu_vm_bo_rmv(struct amdgpu_device
*adev
,
1129 struct amdgpu_bo_va
*bo_va
)
1131 struct amdgpu_bo_va_mapping
*mapping
, *next
;
1132 struct amdgpu_vm
*vm
= bo_va
->vm
;
1134 list_del(&bo_va
->bo_list
);
1136 mutex_lock(&vm
->mutex
);
1138 spin_lock(&vm
->status_lock
);
1139 list_del(&bo_va
->vm_status
);
1140 spin_unlock(&vm
->status_lock
);
1142 list_for_each_entry_safe(mapping
, next
, &bo_va
->mappings
, list
) {
1143 list_del(&mapping
->list
);
1144 interval_tree_remove(&mapping
->it
, &vm
->va
);
1145 trace_amdgpu_vm_bo_unmap(bo_va
, mapping
);
1147 list_add(&mapping
->list
, &vm
->freed
);
1151 amdgpu_fence_unref(&bo_va
->last_pt_update
);
1154 mutex_unlock(&vm
->mutex
);
1158 * amdgpu_vm_bo_invalidate - mark the bo as invalid
1160 * @adev: amdgpu_device pointer
1162 * @bo: amdgpu buffer object
1164 * Mark @bo as invalid (cayman+).
1166 void amdgpu_vm_bo_invalidate(struct amdgpu_device
*adev
,
1167 struct amdgpu_bo
*bo
)
1169 struct amdgpu_bo_va
*bo_va
;
1171 list_for_each_entry(bo_va
, &bo
->va
, bo_list
) {
1173 spin_lock(&bo_va
->vm
->status_lock
);
1174 list_del(&bo_va
->vm_status
);
1175 list_add(&bo_va
->vm_status
, &bo_va
->vm
->invalidated
);
1176 spin_unlock(&bo_va
->vm
->status_lock
);
1182 * amdgpu_vm_init - initialize a vm instance
1184 * @adev: amdgpu_device pointer
1187 * Init @vm fields (cayman+).
1189 int amdgpu_vm_init(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1191 const unsigned align
= min(AMDGPU_VM_PTB_ALIGN_SIZE
,
1192 AMDGPU_VM_PTE_COUNT
* 8);
1193 unsigned pd_size
, pd_entries
, pts_size
;
1196 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
1198 vm
->ids
[i
].flushed_updates
= NULL
;
1199 vm
->ids
[i
].last_id_use
= NULL
;
1201 mutex_init(&vm
->mutex
);
1203 spin_lock_init(&vm
->status_lock
);
1204 INIT_LIST_HEAD(&vm
->invalidated
);
1205 INIT_LIST_HEAD(&vm
->freed
);
1207 pd_size
= amdgpu_vm_directory_size(adev
);
1208 pd_entries
= amdgpu_vm_num_pdes(adev
);
1210 /* allocate page table array */
1211 pts_size
= pd_entries
* sizeof(struct amdgpu_vm_pt
);
1212 vm
->page_tables
= kzalloc(pts_size
, GFP_KERNEL
);
1213 if (vm
->page_tables
== NULL
) {
1214 DRM_ERROR("Cannot allocate memory for page table array\n");
1218 r
= amdgpu_bo_create(adev
, pd_size
, align
, true,
1219 AMDGPU_GEM_DOMAIN_VRAM
, 0,
1220 NULL
, &vm
->page_directory
);
1224 r
= amdgpu_vm_clear_bo(adev
, vm
->page_directory
);
1226 amdgpu_bo_unref(&vm
->page_directory
);
1227 vm
->page_directory
= NULL
;
1235 * amdgpu_vm_fini - tear down a vm instance
1237 * @adev: amdgpu_device pointer
1240 * Tear down @vm (cayman+).
1241 * Unbind the VM and remove all bos from the vm bo list
1243 void amdgpu_vm_fini(struct amdgpu_device
*adev
, struct amdgpu_vm
*vm
)
1245 struct amdgpu_bo_va_mapping
*mapping
, *tmp
;
1248 if (!RB_EMPTY_ROOT(&vm
->va
)) {
1249 dev_err(adev
->dev
, "still active bo inside vm\n");
1251 rbtree_postorder_for_each_entry_safe(mapping
, tmp
, &vm
->va
, it
.rb
) {
1252 list_del(&mapping
->list
);
1253 interval_tree_remove(&mapping
->it
, &vm
->va
);
1256 list_for_each_entry_safe(mapping
, tmp
, &vm
->freed
, list
) {
1257 list_del(&mapping
->list
);
1261 for (i
= 0; i
< amdgpu_vm_num_pdes(adev
); i
++)
1262 amdgpu_bo_unref(&vm
->page_tables
[i
].bo
);
1263 kfree(vm
->page_tables
);
1265 amdgpu_bo_unref(&vm
->page_directory
);
1267 for (i
= 0; i
< AMDGPU_MAX_RINGS
; ++i
) {
1268 amdgpu_fence_unref(&vm
->ids
[i
].flushed_updates
);
1269 amdgpu_fence_unref(&vm
->ids
[i
].last_id_use
);
1272 mutex_destroy(&vm
->mutex
);
This page took 0.082566 seconds and 5 git commands to generate.