2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/kthread.h>
25 #include <linux/wait.h>
26 #include <linux/sched.h>
29 #include "amdgpu_trace.h"
31 static void amdgpu_job_timedout(struct amd_sched_job
*s_job
)
33 struct amdgpu_job
*job
= container_of(s_job
, struct amdgpu_job
, base
);
35 DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n",
36 job
->base
.sched
->name
,
37 atomic_read(&job
->ring
->fence_drv
.last_seq
),
38 job
->ring
->fence_drv
.sync_seq
);
39 amdgpu_gpu_reset(job
->adev
);
42 int amdgpu_job_alloc(struct amdgpu_device
*adev
, unsigned num_ibs
,
43 struct amdgpu_job
**job
, struct amdgpu_vm
*vm
)
45 size_t size
= sizeof(struct amdgpu_job
);
50 size
+= sizeof(struct amdgpu_ib
) * num_ibs
;
52 *job
= kzalloc(size
, GFP_KERNEL
);
58 (*job
)->ibs
= (void *)&(*job
)[1];
59 (*job
)->num_ibs
= num_ibs
;
61 amdgpu_sync_create(&(*job
)->sync
);
66 int amdgpu_job_alloc_with_ib(struct amdgpu_device
*adev
, unsigned size
,
67 struct amdgpu_job
**job
)
71 r
= amdgpu_job_alloc(adev
, 1, job
, NULL
);
75 r
= amdgpu_ib_get(adev
, NULL
, size
, &(*job
)->ibs
[0]);
82 void amdgpu_job_free_resources(struct amdgpu_job
*job
)
87 /* use sched fence if available */
88 f
= job
->base
.s_fence
? &job
->base
.s_fence
->finished
: job
->fence
;
90 for (i
= 0; i
< job
->num_ibs
; ++i
)
91 amdgpu_ib_free(job
->adev
, &job
->ibs
[i
], f
);
94 void amdgpu_job_free_cb(struct amd_sched_job
*s_job
)
96 struct amdgpu_job
*job
= container_of(s_job
, struct amdgpu_job
, base
);
98 fence_put(job
->fence
);
99 amdgpu_sync_free(&job
->sync
);
103 void amdgpu_job_free(struct amdgpu_job
*job
)
105 amdgpu_job_free_resources(job
);
107 fence_put(job
->fence
);
108 amdgpu_sync_free(&job
->sync
);
112 int amdgpu_job_submit(struct amdgpu_job
*job
, struct amdgpu_ring
*ring
,
113 struct amd_sched_entity
*entity
, void *owner
,
122 r
= amd_sched_job_init(&job
->base
, &ring
->sched
, entity
, owner
);
127 job
->ctx
= entity
->fence_context
;
128 *f
= fence_get(&job
->base
.s_fence
->finished
);
129 amdgpu_job_free_resources(job
);
130 amd_sched_entity_push_job(&job
->base
);
135 static struct fence
*amdgpu_job_dependency(struct amd_sched_job
*sched_job
)
137 struct amdgpu_job
*job
= to_amdgpu_job(sched_job
);
138 struct amdgpu_vm
*vm
= job
->vm
;
140 struct fence
*fence
= amdgpu_sync_get_fence(&job
->sync
);
142 if (fence
== NULL
&& vm
&& !job
->vm_id
) {
143 struct amdgpu_ring
*ring
= job
->ring
;
146 r
= amdgpu_vm_grab_id(vm
, ring
, &job
->sync
,
147 &job
->base
.s_fence
->finished
,
150 DRM_ERROR("Error getting VM ID (%d)\n", r
);
152 fence
= amdgpu_sync_get_fence(&job
->sync
);
158 static struct fence
*amdgpu_job_run(struct amd_sched_job
*sched_job
)
160 struct fence
*fence
= NULL
;
161 struct amdgpu_job
*job
;
165 DRM_ERROR("job is null\n");
168 job
= to_amdgpu_job(sched_job
);
170 BUG_ON(amdgpu_sync_peek_fence(&job
->sync
, NULL
));
172 trace_amdgpu_sched_run_job(job
);
173 r
= amdgpu_ib_schedule(job
->ring
, job
->num_ibs
, job
->ibs
,
174 job
->sync
.last_vm_update
, job
, &fence
);
176 DRM_ERROR("Error scheduling IBs (%d)\n", r
);
178 /* if gpu reset, hw fence will be replaced here */
179 fence_put(job
->fence
);
180 job
->fence
= fence_get(fence
);
181 amdgpu_job_free_resources(job
);
185 const struct amd_sched_backend_ops amdgpu_sched_ops
= {
186 .dependency
= amdgpu_job_dependency
,
187 .run_job
= amdgpu_job_run
,
188 .timedout_job
= amdgpu_job_timedout
,
189 .free_job
= amdgpu_job_free_cb