2 * Copyright 2008 Jerome Glisse.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Jerome Glisse <glisse@freedesktop.org>
27 #include <linux/pagemap.h>
29 #include <drm/amdgpu_drm.h>
31 #include "amdgpu_trace.h"
33 int amdgpu_cs_get_ring(struct amdgpu_device
*adev
, u32 ip_type
,
34 u32 ip_instance
, u32 ring
,
35 struct amdgpu_ring
**out_ring
)
37 /* Right now all IPs have only one instance - multiple rings. */
38 if (ip_instance
!= 0) {
39 DRM_ERROR("invalid ip instance: %d\n", ip_instance
);
45 DRM_ERROR("unknown ip type: %d\n", ip_type
);
47 case AMDGPU_HW_IP_GFX
:
48 if (ring
< adev
->gfx
.num_gfx_rings
) {
49 *out_ring
= &adev
->gfx
.gfx_ring
[ring
];
51 DRM_ERROR("only %d gfx rings are supported now\n",
52 adev
->gfx
.num_gfx_rings
);
56 case AMDGPU_HW_IP_COMPUTE
:
57 if (ring
< adev
->gfx
.num_compute_rings
) {
58 *out_ring
= &adev
->gfx
.compute_ring
[ring
];
60 DRM_ERROR("only %d compute rings are supported now\n",
61 adev
->gfx
.num_compute_rings
);
65 case AMDGPU_HW_IP_DMA
:
66 if (ring
< adev
->sdma
.num_instances
) {
67 *out_ring
= &adev
->sdma
.instance
[ring
].ring
;
69 DRM_ERROR("only %d SDMA rings are supported\n",
70 adev
->sdma
.num_instances
);
74 case AMDGPU_HW_IP_UVD
:
75 *out_ring
= &adev
->uvd
.ring
;
77 case AMDGPU_HW_IP_VCE
:
79 *out_ring
= &adev
->vce
.ring
[ring
];
81 DRM_ERROR("only two VCE rings are supported\n");
89 static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser
*p
,
90 struct drm_amdgpu_cs_chunk_fence
*data
,
93 struct drm_gem_object
*gobj
;
95 gobj
= drm_gem_object_lookup(p
->filp
, data
->handle
);
99 p
->uf_entry
.robj
= amdgpu_bo_ref(gem_to_amdgpu_bo(gobj
));
100 p
->uf_entry
.priority
= 0;
101 p
->uf_entry
.tv
.bo
= &p
->uf_entry
.robj
->tbo
;
102 p
->uf_entry
.tv
.shared
= true;
103 p
->uf_entry
.user_pages
= NULL
;
104 *offset
= data
->offset
;
106 drm_gem_object_unreference_unlocked(gobj
);
108 if (amdgpu_ttm_tt_get_usermm(p
->uf_entry
.robj
->tbo
.ttm
)) {
109 amdgpu_bo_unref(&p
->uf_entry
.robj
);
116 int amdgpu_cs_parser_init(struct amdgpu_cs_parser
*p
, void *data
)
118 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
119 struct amdgpu_vm
*vm
= &fpriv
->vm
;
120 union drm_amdgpu_cs
*cs
= data
;
121 uint64_t *chunk_array_user
;
122 uint64_t *chunk_array
;
123 unsigned size
, num_ibs
= 0;
124 uint32_t uf_offset
= 0;
128 if (cs
->in
.num_chunks
== 0)
131 chunk_array
= kmalloc_array(cs
->in
.num_chunks
, sizeof(uint64_t), GFP_KERNEL
);
135 p
->ctx
= amdgpu_ctx_get(fpriv
, cs
->in
.ctx_id
);
142 chunk_array_user
= (uint64_t __user
*)(unsigned long)(cs
->in
.chunks
);
143 if (copy_from_user(chunk_array
, chunk_array_user
,
144 sizeof(uint64_t)*cs
->in
.num_chunks
)) {
149 p
->nchunks
= cs
->in
.num_chunks
;
150 p
->chunks
= kmalloc_array(p
->nchunks
, sizeof(struct amdgpu_cs_chunk
),
157 for (i
= 0; i
< p
->nchunks
; i
++) {
158 struct drm_amdgpu_cs_chunk __user
**chunk_ptr
= NULL
;
159 struct drm_amdgpu_cs_chunk user_chunk
;
160 uint32_t __user
*cdata
;
162 chunk_ptr
= (void __user
*)(unsigned long)chunk_array
[i
];
163 if (copy_from_user(&user_chunk
, chunk_ptr
,
164 sizeof(struct drm_amdgpu_cs_chunk
))) {
167 goto free_partial_kdata
;
169 p
->chunks
[i
].chunk_id
= user_chunk
.chunk_id
;
170 p
->chunks
[i
].length_dw
= user_chunk
.length_dw
;
172 size
= p
->chunks
[i
].length_dw
;
173 cdata
= (void __user
*)(unsigned long)user_chunk
.chunk_data
;
175 p
->chunks
[i
].kdata
= drm_malloc_ab(size
, sizeof(uint32_t));
176 if (p
->chunks
[i
].kdata
== NULL
) {
179 goto free_partial_kdata
;
181 size
*= sizeof(uint32_t);
182 if (copy_from_user(p
->chunks
[i
].kdata
, cdata
, size
)) {
184 goto free_partial_kdata
;
187 switch (p
->chunks
[i
].chunk_id
) {
188 case AMDGPU_CHUNK_ID_IB
:
192 case AMDGPU_CHUNK_ID_FENCE
:
193 size
= sizeof(struct drm_amdgpu_cs_chunk_fence
);
194 if (p
->chunks
[i
].length_dw
* sizeof(uint32_t) < size
) {
196 goto free_partial_kdata
;
199 ret
= amdgpu_cs_user_fence_chunk(p
, p
->chunks
[i
].kdata
,
202 goto free_partial_kdata
;
206 case AMDGPU_CHUNK_ID_DEPENDENCIES
:
211 goto free_partial_kdata
;
215 ret
= amdgpu_job_alloc(p
->adev
, num_ibs
, &p
->job
, vm
);
219 if (p
->uf_entry
.robj
) {
220 p
->job
->uf_bo
= amdgpu_bo_ref(p
->uf_entry
.robj
);
221 p
->job
->uf_offset
= uf_offset
;
231 drm_free_large(p
->chunks
[i
].kdata
);
234 amdgpu_ctx_put(p
->ctx
);
241 /* Returns how many bytes TTM can move per IB.
243 static u64
amdgpu_cs_get_threshold_for_moves(struct amdgpu_device
*adev
)
245 u64 real_vram_size
= adev
->mc
.real_vram_size
;
246 u64 vram_usage
= atomic64_read(&adev
->vram_usage
);
248 /* This function is based on the current VRAM usage.
250 * - If all of VRAM is free, allow relocating the number of bytes that
251 * is equal to 1/4 of the size of VRAM for this IB.
253 * - If more than one half of VRAM is occupied, only allow relocating
254 * 1 MB of data for this IB.
256 * - From 0 to one half of used VRAM, the threshold decreases
271 * Note: It's a threshold, not a limit. The threshold must be crossed
272 * for buffer relocations to stop, so any buffer of an arbitrary size
273 * can be moved as long as the threshold isn't crossed before
274 * the relocation takes place. We don't want to disable buffer
275 * relocations completely.
277 * The idea is that buffers should be placed in VRAM at creation time
278 * and TTM should only do a minimum number of relocations during
279 * command submission. In practice, you need to submit at least
280 * a dozen IBs to move all buffers to VRAM if they are in GTT.
282 * Also, things can get pretty crazy under memory pressure and actual
283 * VRAM usage can change a lot, so playing safe even at 50% does
284 * consistently increase performance.
287 u64 half_vram
= real_vram_size
>> 1;
288 u64 half_free_vram
= vram_usage
>= half_vram
? 0 : half_vram
- vram_usage
;
289 u64 bytes_moved_threshold
= half_free_vram
>> 1;
290 return max(bytes_moved_threshold
, 1024*1024ull);
293 int amdgpu_cs_list_validate(struct amdgpu_cs_parser
*p
,
294 struct list_head
*validated
)
296 struct amdgpu_bo_list_entry
*lobj
;
297 u64 initial_bytes_moved
;
300 list_for_each_entry(lobj
, validated
, tv
.head
) {
301 struct amdgpu_bo
*bo
= lobj
->robj
;
302 bool binding_userptr
= false;
303 struct mm_struct
*usermm
;
306 usermm
= amdgpu_ttm_tt_get_usermm(bo
->tbo
.ttm
);
307 if (usermm
&& usermm
!= current
->mm
)
310 /* Check if we have user pages and nobody bound the BO already */
311 if (lobj
->user_pages
&& bo
->tbo
.ttm
->state
!= tt_bound
) {
312 size_t size
= sizeof(struct page
*);
314 size
*= bo
->tbo
.ttm
->num_pages
;
315 memcpy(bo
->tbo
.ttm
->pages
, lobj
->user_pages
, size
);
316 binding_userptr
= true;
322 /* Avoid moving this one if we have moved too many buffers
323 * for this IB already.
325 * Note that this allows moving at least one buffer of
326 * any size, because it doesn't take the current "bo"
327 * into account. We don't want to disallow buffer moves
330 if (p
->bytes_moved
<= p
->bytes_moved_threshold
)
331 domain
= bo
->prefered_domains
;
333 domain
= bo
->allowed_domains
;
336 amdgpu_ttm_placement_from_domain(bo
, domain
);
337 initial_bytes_moved
= atomic64_read(&bo
->adev
->num_bytes_moved
);
338 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
339 p
->bytes_moved
+= atomic64_read(&bo
->adev
->num_bytes_moved
) -
343 if (r
!= -ERESTARTSYS
&& domain
!= bo
->allowed_domains
) {
344 domain
= bo
->allowed_domains
;
350 if (binding_userptr
) {
351 drm_free_large(lobj
->user_pages
);
352 lobj
->user_pages
= NULL
;
358 static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser
*p
,
359 union drm_amdgpu_cs
*cs
)
361 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
362 struct amdgpu_bo_list_entry
*e
;
363 struct list_head duplicates
;
364 bool need_mmap_lock
= false;
365 unsigned i
, tries
= 10;
368 INIT_LIST_HEAD(&p
->validated
);
370 p
->bo_list
= amdgpu_bo_list_get(fpriv
, cs
->in
.bo_list_handle
);
372 need_mmap_lock
= p
->bo_list
->first_userptr
!=
373 p
->bo_list
->num_entries
;
374 amdgpu_bo_list_get_list(p
->bo_list
, &p
->validated
);
377 INIT_LIST_HEAD(&duplicates
);
378 amdgpu_vm_get_pd_bo(&fpriv
->vm
, &p
->validated
, &p
->vm_pd
);
380 if (p
->uf_entry
.robj
)
381 list_add(&p
->uf_entry
.tv
.head
, &p
->validated
);
384 down_read(¤t
->mm
->mmap_sem
);
387 struct list_head need_pages
;
390 r
= ttm_eu_reserve_buffers(&p
->ticket
, &p
->validated
, true,
392 if (unlikely(r
!= 0))
393 goto error_free_pages
;
395 /* Without a BO list we don't have userptr BOs */
399 INIT_LIST_HEAD(&need_pages
);
400 for (i
= p
->bo_list
->first_userptr
;
401 i
< p
->bo_list
->num_entries
; ++i
) {
403 e
= &p
->bo_list
->array
[i
];
405 if (amdgpu_ttm_tt_userptr_invalidated(e
->robj
->tbo
.ttm
,
406 &e
->user_invalidated
) && e
->user_pages
) {
408 /* We acquired a page array, but somebody
409 * invalidated it. Free it an try again
411 release_pages(e
->user_pages
,
412 e
->robj
->tbo
.ttm
->num_pages
,
414 drm_free_large(e
->user_pages
);
415 e
->user_pages
= NULL
;
418 if (e
->robj
->tbo
.ttm
->state
!= tt_bound
&&
420 list_del(&e
->tv
.head
);
421 list_add(&e
->tv
.head
, &need_pages
);
423 amdgpu_bo_unreserve(e
->robj
);
427 if (list_empty(&need_pages
))
430 /* Unreserve everything again. */
431 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
433 /* We tried to often, just abort */
436 goto error_free_pages
;
439 /* Fill the page arrays for all useptrs. */
440 list_for_each_entry(e
, &need_pages
, tv
.head
) {
441 struct ttm_tt
*ttm
= e
->robj
->tbo
.ttm
;
443 e
->user_pages
= drm_calloc_large(ttm
->num_pages
,
444 sizeof(struct page
*));
445 if (!e
->user_pages
) {
447 goto error_free_pages
;
450 r
= amdgpu_ttm_tt_get_user_pages(ttm
, e
->user_pages
);
452 drm_free_large(e
->user_pages
);
453 e
->user_pages
= NULL
;
454 goto error_free_pages
;
459 list_splice(&need_pages
, &p
->validated
);
462 amdgpu_vm_get_pt_bos(&fpriv
->vm
, &duplicates
);
464 p
->bytes_moved_threshold
= amdgpu_cs_get_threshold_for_moves(p
->adev
);
467 r
= amdgpu_cs_list_validate(p
, &duplicates
);
471 r
= amdgpu_cs_list_validate(p
, &p
->validated
);
476 struct amdgpu_bo
*gds
= p
->bo_list
->gds_obj
;
477 struct amdgpu_bo
*gws
= p
->bo_list
->gws_obj
;
478 struct amdgpu_bo
*oa
= p
->bo_list
->oa_obj
;
479 struct amdgpu_vm
*vm
= &fpriv
->vm
;
482 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++) {
483 struct amdgpu_bo
*bo
= p
->bo_list
->array
[i
].robj
;
485 p
->bo_list
->array
[i
].bo_va
= amdgpu_vm_bo_find(vm
, bo
);
489 p
->job
->gds_base
= amdgpu_bo_gpu_offset(gds
);
490 p
->job
->gds_size
= amdgpu_bo_size(gds
);
493 p
->job
->gws_base
= amdgpu_bo_gpu_offset(gws
);
494 p
->job
->gws_size
= amdgpu_bo_size(gws
);
497 p
->job
->oa_base
= amdgpu_bo_gpu_offset(oa
);
498 p
->job
->oa_size
= amdgpu_bo_size(oa
);
504 amdgpu_vm_move_pt_bos_in_lru(p
->adev
, &fpriv
->vm
);
505 ttm_eu_backoff_reservation(&p
->ticket
, &p
->validated
);
511 up_read(¤t
->mm
->mmap_sem
);
514 for (i
= p
->bo_list
->first_userptr
;
515 i
< p
->bo_list
->num_entries
; ++i
) {
516 e
= &p
->bo_list
->array
[i
];
521 release_pages(e
->user_pages
,
522 e
->robj
->tbo
.ttm
->num_pages
,
524 drm_free_large(e
->user_pages
);
531 static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser
*p
)
533 struct amdgpu_bo_list_entry
*e
;
536 list_for_each_entry(e
, &p
->validated
, tv
.head
) {
537 struct reservation_object
*resv
= e
->robj
->tbo
.resv
;
538 r
= amdgpu_sync_resv(p
->adev
, &p
->job
->sync
, resv
, p
->filp
);
547 * cs_parser_fini() - clean parser states
548 * @parser: parser structure holding parsing context.
549 * @error: error number
551 * If error is set than unvalidate buffer, otherwise just free memory
552 * used by parsing context.
554 static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser
*parser
, int error
, bool backoff
)
556 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
560 amdgpu_vm_move_pt_bos_in_lru(parser
->adev
, &fpriv
->vm
);
562 ttm_eu_fence_buffer_objects(&parser
->ticket
,
565 } else if (backoff
) {
566 ttm_eu_backoff_reservation(&parser
->ticket
,
569 fence_put(parser
->fence
);
572 amdgpu_ctx_put(parser
->ctx
);
574 amdgpu_bo_list_put(parser
->bo_list
);
576 for (i
= 0; i
< parser
->nchunks
; i
++)
577 drm_free_large(parser
->chunks
[i
].kdata
);
578 kfree(parser
->chunks
);
580 amdgpu_job_free(parser
->job
);
581 amdgpu_bo_unref(&parser
->uf_entry
.robj
);
584 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser
*p
,
585 struct amdgpu_vm
*vm
)
587 struct amdgpu_device
*adev
= p
->adev
;
588 struct amdgpu_bo_va
*bo_va
;
589 struct amdgpu_bo
*bo
;
592 r
= amdgpu_vm_update_page_directory(adev
, vm
);
596 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, vm
->page_directory_fence
);
600 r
= amdgpu_vm_clear_freed(adev
, vm
);
605 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++) {
608 /* ignore duplicates */
609 bo
= p
->bo_list
->array
[i
].robj
;
613 bo_va
= p
->bo_list
->array
[i
].bo_va
;
617 r
= amdgpu_vm_bo_update(adev
, bo_va
, &bo
->tbo
.mem
);
621 f
= bo_va
->last_pt_update
;
622 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
, f
);
629 r
= amdgpu_vm_clear_invalids(adev
, vm
, &p
->job
->sync
);
631 if (amdgpu_vm_debug
&& p
->bo_list
) {
632 /* Invalidate all BOs to test for userspace bugs */
633 for (i
= 0; i
< p
->bo_list
->num_entries
; i
++) {
634 /* ignore duplicates */
635 bo
= p
->bo_list
->array
[i
].robj
;
639 amdgpu_vm_bo_invalidate(adev
, bo
);
646 static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device
*adev
,
647 struct amdgpu_cs_parser
*p
)
649 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
650 struct amdgpu_vm
*vm
= &fpriv
->vm
;
651 struct amdgpu_ring
*ring
= p
->job
->ring
;
654 /* Only for UVD/VCE VM emulation */
655 if (ring
->funcs
->parse_cs
) {
656 for (i
= 0; i
< p
->job
->num_ibs
; i
++) {
657 r
= amdgpu_ring_parse_cs(ring
, p
, i
);
663 r
= amdgpu_bo_vm_update_pte(p
, vm
);
665 amdgpu_cs_sync_rings(p
);
670 static int amdgpu_cs_handle_lockup(struct amdgpu_device
*adev
, int r
)
673 r
= amdgpu_gpu_reset(adev
);
680 static int amdgpu_cs_ib_fill(struct amdgpu_device
*adev
,
681 struct amdgpu_cs_parser
*parser
)
683 struct amdgpu_fpriv
*fpriv
= parser
->filp
->driver_priv
;
684 struct amdgpu_vm
*vm
= &fpriv
->vm
;
688 for (i
= 0, j
= 0; i
< parser
->nchunks
&& j
< parser
->job
->num_ibs
; i
++) {
689 struct amdgpu_cs_chunk
*chunk
;
690 struct amdgpu_ib
*ib
;
691 struct drm_amdgpu_cs_chunk_ib
*chunk_ib
;
692 struct amdgpu_ring
*ring
;
694 chunk
= &parser
->chunks
[i
];
695 ib
= &parser
->job
->ibs
[j
];
696 chunk_ib
= (struct drm_amdgpu_cs_chunk_ib
*)chunk
->kdata
;
698 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_IB
)
701 r
= amdgpu_cs_get_ring(adev
, chunk_ib
->ip_type
,
702 chunk_ib
->ip_instance
, chunk_ib
->ring
,
707 if (parser
->job
->ring
&& parser
->job
->ring
!= ring
)
710 parser
->job
->ring
= ring
;
712 if (ring
->funcs
->parse_cs
) {
713 struct amdgpu_bo_va_mapping
*m
;
714 struct amdgpu_bo
*aobj
= NULL
;
718 m
= amdgpu_cs_find_mapping(parser
, chunk_ib
->va_start
,
721 DRM_ERROR("IB va_start is invalid\n");
725 if ((chunk_ib
->va_start
+ chunk_ib
->ib_bytes
) >
726 (m
->it
.last
+ 1) * AMDGPU_GPU_PAGE_SIZE
) {
727 DRM_ERROR("IB va_start+ib_bytes is invalid\n");
731 /* the IB should be reserved at this point */
732 r
= amdgpu_bo_kmap(aobj
, (void **)&kptr
);
737 offset
= ((uint64_t)m
->it
.start
) * AMDGPU_GPU_PAGE_SIZE
;
738 kptr
+= chunk_ib
->va_start
- offset
;
740 r
= amdgpu_ib_get(adev
, NULL
, chunk_ib
->ib_bytes
, ib
);
742 DRM_ERROR("Failed to get ib !\n");
746 memcpy(ib
->ptr
, kptr
, chunk_ib
->ib_bytes
);
747 amdgpu_bo_kunmap(aobj
);
749 r
= amdgpu_ib_get(adev
, vm
, 0, ib
);
751 DRM_ERROR("Failed to get ib !\n");
755 ib
->gpu_addr
= chunk_ib
->va_start
;
758 ib
->length_dw
= chunk_ib
->ib_bytes
/ 4;
759 ib
->flags
= chunk_ib
->flags
;
763 /* UVD & VCE fw doesn't support user fences */
764 if (parser
->job
->uf_bo
&& (
765 parser
->job
->ring
->type
== AMDGPU_RING_TYPE_UVD
||
766 parser
->job
->ring
->type
== AMDGPU_RING_TYPE_VCE
))
772 static int amdgpu_cs_dependencies(struct amdgpu_device
*adev
,
773 struct amdgpu_cs_parser
*p
)
775 struct amdgpu_fpriv
*fpriv
= p
->filp
->driver_priv
;
778 for (i
= 0; i
< p
->nchunks
; ++i
) {
779 struct drm_amdgpu_cs_chunk_dep
*deps
;
780 struct amdgpu_cs_chunk
*chunk
;
783 chunk
= &p
->chunks
[i
];
785 if (chunk
->chunk_id
!= AMDGPU_CHUNK_ID_DEPENDENCIES
)
788 deps
= (struct drm_amdgpu_cs_chunk_dep
*)chunk
->kdata
;
789 num_deps
= chunk
->length_dw
* 4 /
790 sizeof(struct drm_amdgpu_cs_chunk_dep
);
792 for (j
= 0; j
< num_deps
; ++j
) {
793 struct amdgpu_ring
*ring
;
794 struct amdgpu_ctx
*ctx
;
797 r
= amdgpu_cs_get_ring(adev
, deps
[j
].ip_type
,
799 deps
[j
].ring
, &ring
);
803 ctx
= amdgpu_ctx_get(fpriv
, deps
[j
].ctx_id
);
807 fence
= amdgpu_ctx_get_fence(ctx
, ring
,
815 r
= amdgpu_sync_fence(adev
, &p
->job
->sync
,
828 static int amdgpu_cs_submit(struct amdgpu_cs_parser
*p
,
829 union drm_amdgpu_cs
*cs
)
831 struct amdgpu_ring
*ring
= p
->job
->ring
;
832 struct amd_sched_entity
*entity
= &p
->ctx
->rings
[ring
->idx
].entity
;
834 struct amdgpu_job
*job
;
840 r
= amd_sched_job_init(&job
->base
, &ring
->sched
,
841 entity
, amdgpu_job_timeout_func
,
842 amdgpu_job_free_func
,
845 amdgpu_job_free(job
);
849 job
->owner
= p
->filp
;
850 job
->ctx
= entity
->fence_context
;
851 p
->fence
= fence_get(fence
);
852 cs
->out
.handle
= amdgpu_ctx_add_fence(p
->ctx
, ring
, fence
);
853 job
->uf_sequence
= cs
->out
.handle
;
855 trace_amdgpu_cs_ioctl(job
);
856 amd_sched_entity_push_job(&job
->base
);
861 int amdgpu_cs_ioctl(struct drm_device
*dev
, void *data
, struct drm_file
*filp
)
863 struct amdgpu_device
*adev
= dev
->dev_private
;
864 union drm_amdgpu_cs
*cs
= data
;
865 struct amdgpu_cs_parser parser
= {};
866 bool reserved_buffers
= false;
869 if (!adev
->accel_working
)
875 r
= amdgpu_cs_parser_init(&parser
, data
);
877 DRM_ERROR("Failed to initialize parser !\n");
878 amdgpu_cs_parser_fini(&parser
, r
, false);
879 r
= amdgpu_cs_handle_lockup(adev
, r
);
882 r
= amdgpu_cs_parser_bos(&parser
, data
);
884 DRM_ERROR("Not enough memory for command submission!\n");
885 else if (r
&& r
!= -ERESTARTSYS
)
886 DRM_ERROR("Failed to process the buffer list %d!\n", r
);
888 reserved_buffers
= true;
889 r
= amdgpu_cs_ib_fill(adev
, &parser
);
893 r
= amdgpu_cs_dependencies(adev
, &parser
);
895 DRM_ERROR("Failed in the dependencies handling %d!\n", r
);
901 for (i
= 0; i
< parser
.job
->num_ibs
; i
++)
902 trace_amdgpu_cs(&parser
, i
);
904 r
= amdgpu_cs_ib_vm_chunk(adev
, &parser
);
908 r
= amdgpu_cs_submit(&parser
, cs
);
911 amdgpu_cs_parser_fini(&parser
, r
, reserved_buffers
);
912 r
= amdgpu_cs_handle_lockup(adev
, r
);
917 * amdgpu_cs_wait_ioctl - wait for a command submission to finish
920 * @data: data from userspace
921 * @filp: file private
923 * Wait for the command submission identified by handle to finish.
925 int amdgpu_cs_wait_ioctl(struct drm_device
*dev
, void *data
,
926 struct drm_file
*filp
)
928 union drm_amdgpu_wait_cs
*wait
= data
;
929 struct amdgpu_device
*adev
= dev
->dev_private
;
930 unsigned long timeout
= amdgpu_gem_timeout(wait
->in
.timeout
);
931 struct amdgpu_ring
*ring
= NULL
;
932 struct amdgpu_ctx
*ctx
;
936 r
= amdgpu_cs_get_ring(adev
, wait
->in
.ip_type
, wait
->in
.ip_instance
,
937 wait
->in
.ring
, &ring
);
941 ctx
= amdgpu_ctx_get(filp
->driver_priv
, wait
->in
.ctx_id
);
945 fence
= amdgpu_ctx_get_fence(ctx
, ring
, wait
->in
.handle
);
949 r
= fence_wait_timeout(fence
, true, timeout
);
958 memset(wait
, 0, sizeof(*wait
));
959 wait
->out
.status
= (r
== 0);
965 * amdgpu_cs_find_bo_va - find bo_va for VM address
967 * @parser: command submission parser context
969 * @bo: resulting BO of the mapping found
971 * Search the buffer objects in the command submission context for a certain
972 * virtual memory address. Returns allocation structure when found, NULL
975 struct amdgpu_bo_va_mapping
*
976 amdgpu_cs_find_mapping(struct amdgpu_cs_parser
*parser
,
977 uint64_t addr
, struct amdgpu_bo
**bo
)
979 struct amdgpu_bo_va_mapping
*mapping
;
982 if (!parser
->bo_list
)
985 addr
/= AMDGPU_GPU_PAGE_SIZE
;
987 for (i
= 0; i
< parser
->bo_list
->num_entries
; i
++) {
988 struct amdgpu_bo_list_entry
*lobj
;
990 lobj
= &parser
->bo_list
->array
[i
];
994 list_for_each_entry(mapping
, &lobj
->bo_va
->valids
, list
) {
995 if (mapping
->it
.start
> addr
||
996 addr
> mapping
->it
.last
)
999 *bo
= lobj
->bo_va
->bo
;
1003 list_for_each_entry(mapping
, &lobj
->bo_va
->invalids
, list
) {
1004 if (mapping
->it
.start
> addr
||
1005 addr
> mapping
->it
.last
)
1008 *bo
= lobj
->bo_va
->bo
;