Commit | Line | Data |
---|---|---|
a72ce6f8 JZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
24 | #include <linux/kthread.h> | |
25 | #include <linux/wait.h> | |
26 | #include <linux/sched.h> | |
27 | #include <drm/drmP.h> | |
28 | #include "gpu_scheduler.h" | |
29 | ||
69bd5bf1 CK |
30 | static struct amd_sched_job * |
31 | amd_sched_entity_pop_job(struct amd_sched_entity *entity); | |
88079006 CK |
32 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); |
33 | ||
a72ce6f8 | 34 | /* Initialize a given run queue struct */ |
432a4ff8 | 35 | static void amd_sched_rq_init(struct amd_sched_rq *rq) |
a72ce6f8 | 36 | { |
2b184d8d | 37 | spin_lock_init(&rq->lock); |
432a4ff8 | 38 | INIT_LIST_HEAD(&rq->entities); |
432a4ff8 | 39 | rq->current_entity = NULL; |
a72ce6f8 JZ |
40 | } |
41 | ||
432a4ff8 CK |
42 | static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, |
43 | struct amd_sched_entity *entity) | |
a72ce6f8 | 44 | { |
2b184d8d | 45 | spin_lock(&rq->lock); |
432a4ff8 | 46 | list_add_tail(&entity->list, &rq->entities); |
2b184d8d | 47 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
48 | } |
49 | ||
432a4ff8 CK |
50 | static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, |
51 | struct amd_sched_entity *entity) | |
a72ce6f8 | 52 | { |
2b184d8d | 53 | spin_lock(&rq->lock); |
432a4ff8 CK |
54 | list_del_init(&entity->list); |
55 | if (rq->current_entity == entity) | |
56 | rq->current_entity = NULL; | |
2b184d8d | 57 | spin_unlock(&rq->lock); |
a72ce6f8 JZ |
58 | } |
59 | ||
60 | /** | |
69bd5bf1 CK |
61 | * Select next job from a specified run queue with round robin policy. |
62 | * Return NULL if nothing available. | |
a72ce6f8 | 63 | */ |
69bd5bf1 CK |
64 | static struct amd_sched_job * |
65 | amd_sched_rq_select_job(struct amd_sched_rq *rq) | |
a72ce6f8 | 66 | { |
2b184d8d | 67 | struct amd_sched_entity *entity; |
69bd5bf1 | 68 | struct amd_sched_job *job; |
432a4ff8 | 69 | |
2b184d8d CK |
70 | spin_lock(&rq->lock); |
71 | ||
72 | entity = rq->current_entity; | |
432a4ff8 CK |
73 | if (entity) { |
74 | list_for_each_entry_continue(entity, &rq->entities, list) { | |
69bd5bf1 CK |
75 | job = amd_sched_entity_pop_job(entity); |
76 | if (job) { | |
432a4ff8 | 77 | rq->current_entity = entity; |
2b184d8d | 78 | spin_unlock(&rq->lock); |
69bd5bf1 | 79 | return job; |
432a4ff8 | 80 | } |
a72ce6f8 | 81 | } |
a72ce6f8 | 82 | } |
a72ce6f8 | 83 | |
432a4ff8 | 84 | list_for_each_entry(entity, &rq->entities, list) { |
a72ce6f8 | 85 | |
69bd5bf1 CK |
86 | job = amd_sched_entity_pop_job(entity); |
87 | if (job) { | |
432a4ff8 | 88 | rq->current_entity = entity; |
2b184d8d | 89 | spin_unlock(&rq->lock); |
69bd5bf1 | 90 | return job; |
432a4ff8 | 91 | } |
a72ce6f8 | 92 | |
432a4ff8 CK |
93 | if (entity == rq->current_entity) |
94 | break; | |
95 | } | |
a72ce6f8 | 96 | |
2b184d8d CK |
97 | spin_unlock(&rq->lock); |
98 | ||
432a4ff8 | 99 | return NULL; |
a72ce6f8 JZ |
100 | } |
101 | ||
a72ce6f8 JZ |
102 | /** |
103 | * Init a context entity used by scheduler when submit to HW ring. | |
104 | * | |
105 | * @sched The pointer to the scheduler | |
91404fb2 | 106 | * @entity The pointer to a valid amd_sched_entity |
a72ce6f8 | 107 | * @rq The run queue this entity belongs |
0e89d0c1 | 108 | * @kernel If this is an entity for the kernel |
1333f723 | 109 | * @jobs The max number of jobs in the job queue |
a72ce6f8 JZ |
110 | * |
111 | * return 0 if succeed. negative error code on failure | |
112 | */ | |
91404fb2 | 113 | int amd_sched_entity_init(struct amd_gpu_scheduler *sched, |
6f0e54a9 | 114 | struct amd_sched_entity *entity, |
432a4ff8 | 115 | struct amd_sched_rq *rq, |
6f0e54a9 | 116 | uint32_t jobs) |
a72ce6f8 | 117 | { |
a72ce6f8 JZ |
118 | if (!(sched && entity && rq)) |
119 | return -EINVAL; | |
120 | ||
91404fb2 | 121 | memset(entity, 0, sizeof(struct amd_sched_entity)); |
91404fb2 | 122 | entity->belongto_rq = rq; |
a72ce6f8 | 123 | entity->scheduler = sched; |
f556cb0c | 124 | entity->fence_context = fence_context_alloc(1); |
a72ce6f8 | 125 | if(kfifo_alloc(&entity->job_queue, |
1333f723 | 126 | jobs * sizeof(void *), |
a72ce6f8 JZ |
127 | GFP_KERNEL)) |
128 | return -EINVAL; | |
129 | ||
130 | spin_lock_init(&entity->queue_lock); | |
ce882e6d | 131 | atomic_set(&entity->fence_seq, 0); |
a72ce6f8 JZ |
132 | |
133 | /* Add the entity to the run queue */ | |
432a4ff8 | 134 | amd_sched_rq_add_entity(rq, entity); |
a72ce6f8 JZ |
135 | return 0; |
136 | } | |
137 | ||
138 | /** | |
139 | * Query if entity is initialized | |
140 | * | |
141 | * @sched Pointer to scheduler instance | |
142 | * @entity The pointer to a valid scheduler entity | |
143 | * | |
144 | * return true if entity is initialized, false otherwise | |
145 | */ | |
d54fdb94 CK |
146 | static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched, |
147 | struct amd_sched_entity *entity) | |
a72ce6f8 JZ |
148 | { |
149 | return entity->scheduler == sched && | |
91404fb2 | 150 | entity->belongto_rq != NULL; |
a72ce6f8 JZ |
151 | } |
152 | ||
aef4852e CK |
153 | /** |
154 | * Check if entity is idle | |
155 | * | |
156 | * @entity The pointer to a valid scheduler entity | |
157 | * | |
158 | * Return true if entity don't has any unscheduled jobs. | |
159 | */ | |
160 | static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity) | |
a72ce6f8 | 161 | { |
aef4852e CK |
162 | rmb(); |
163 | if (kfifo_is_empty(&entity->job_queue)) | |
a72ce6f8 JZ |
164 | return true; |
165 | ||
166 | return false; | |
167 | } | |
168 | ||
169 | /** | |
170 | * Destroy a context entity | |
171 | * | |
172 | * @sched Pointer to scheduler instance | |
173 | * @entity The pointer to a valid scheduler entity | |
174 | * | |
062c7fb3 | 175 | * Cleanup and free the allocated resources. |
a72ce6f8 | 176 | */ |
062c7fb3 CK |
177 | void amd_sched_entity_fini(struct amd_gpu_scheduler *sched, |
178 | struct amd_sched_entity *entity) | |
a72ce6f8 | 179 | { |
432a4ff8 | 180 | struct amd_sched_rq *rq = entity->belongto_rq; |
a72ce6f8 | 181 | |
d54fdb94 | 182 | if (!amd_sched_entity_is_initialized(sched, entity)) |
062c7fb3 | 183 | return; |
6c859274 | 184 | |
a72ce6f8 JZ |
185 | /** |
186 | * The client will not queue more IBs during this fini, consume existing | |
187 | * queued IBs | |
188 | */ | |
c2b6bd7e | 189 | wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity)); |
a72ce6f8 | 190 | |
432a4ff8 | 191 | amd_sched_rq_remove_entity(rq, entity); |
a72ce6f8 | 192 | kfifo_free(&entity->job_queue); |
a72ce6f8 JZ |
193 | } |
194 | ||
e61235db CK |
195 | static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb) |
196 | { | |
197 | struct amd_sched_entity *entity = | |
198 | container_of(cb, struct amd_sched_entity, cb); | |
199 | entity->dependency = NULL; | |
200 | fence_put(f); | |
201 | amd_sched_wakeup(entity->scheduler); | |
202 | } | |
203 | ||
69bd5bf1 CK |
204 | static struct amd_sched_job * |
205 | amd_sched_entity_pop_job(struct amd_sched_entity *entity) | |
206 | { | |
e61235db | 207 | struct amd_gpu_scheduler *sched = entity->scheduler; |
69bd5bf1 CK |
208 | struct amd_sched_job *job; |
209 | ||
e61235db CK |
210 | if (ACCESS_ONCE(entity->dependency)) |
211 | return NULL; | |
212 | ||
69bd5bf1 CK |
213 | if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job))) |
214 | return NULL; | |
215 | ||
e61235db CK |
216 | while ((entity->dependency = sched->ops->dependency(job))) { |
217 | ||
218 | if (fence_add_callback(entity->dependency, &entity->cb, | |
219 | amd_sched_entity_wakeup)) | |
220 | fence_put(entity->dependency); | |
221 | else | |
222 | return NULL; | |
223 | } | |
224 | ||
69bd5bf1 CK |
225 | return job; |
226 | } | |
227 | ||
a72ce6f8 | 228 | /** |
6c859274 | 229 | * Helper to submit a job to the job queue |
a72ce6f8 | 230 | * |
a72ce6f8 | 231 | * @job The pointer to job required to submit |
6c859274 CK |
232 | * |
233 | * Returns true if we could submit the job. | |
234 | */ | |
235 | static bool amd_sched_entity_in(struct amd_sched_job *job) | |
a72ce6f8 | 236 | { |
6c859274 CK |
237 | struct amd_sched_entity *entity = job->s_entity; |
238 | bool added, first = false; | |
239 | ||
240 | spin_lock(&entity->queue_lock); | |
241 | added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job); | |
242 | ||
243 | if (added && kfifo_len(&entity->job_queue) == sizeof(job)) | |
244 | first = true; | |
245 | ||
246 | spin_unlock(&entity->queue_lock); | |
247 | ||
248 | /* first job wakes up scheduler */ | |
249 | if (first) | |
88079006 | 250 | amd_sched_wakeup(job->sched); |
6c859274 CK |
251 | |
252 | return added; | |
253 | } | |
254 | ||
255 | /** | |
256 | * Submit a job to the job queue | |
257 | * | |
258 | * @job The pointer to job required to submit | |
259 | * | |
260 | * Returns 0 for success, negative error code otherwise. | |
261 | */ | |
262 | int amd_sched_entity_push_job(struct amd_sched_job *sched_job) | |
263 | { | |
264 | struct amd_sched_entity *entity = sched_job->s_entity; | |
84f76ea6 CZ |
265 | struct amd_sched_fence *fence = amd_sched_fence_create( |
266 | entity, sched_job->owner); | |
6c859274 | 267 | |
f556cb0c | 268 | if (!fence) |
6c859274 CK |
269 | return -ENOMEM; |
270 | ||
bb977d37 CZ |
271 | fence_get(&fence->base); |
272 | sched_job->s_fence = fence; | |
6c859274 | 273 | |
c9f0fe5e CZ |
274 | wait_event(entity->scheduler->job_scheduled, |
275 | amd_sched_entity_in(sched_job)); | |
6c859274 | 276 | |
c9f0fe5e | 277 | return 0; |
a72ce6f8 JZ |
278 | } |
279 | ||
e688b728 CK |
280 | /** |
281 | * Return ture if we can push more jobs to the hw. | |
282 | */ | |
283 | static bool amd_sched_ready(struct amd_gpu_scheduler *sched) | |
284 | { | |
285 | return atomic_read(&sched->hw_rq_count) < | |
286 | sched->hw_submission_limit; | |
287 | } | |
288 | ||
88079006 CK |
289 | /** |
290 | * Wake up the scheduler when it is ready | |
291 | */ | |
292 | static void amd_sched_wakeup(struct amd_gpu_scheduler *sched) | |
293 | { | |
294 | if (amd_sched_ready(sched)) | |
c2b6bd7e | 295 | wake_up_interruptible(&sched->wake_up_worker); |
88079006 CK |
296 | } |
297 | ||
e688b728 | 298 | /** |
69bd5bf1 | 299 | * Select next to run |
e688b728 | 300 | */ |
69bd5bf1 CK |
301 | static struct amd_sched_job * |
302 | amd_sched_select_job(struct amd_gpu_scheduler *sched) | |
e688b728 | 303 | { |
69bd5bf1 | 304 | struct amd_sched_job *job; |
e688b728 CK |
305 | |
306 | if (!amd_sched_ready(sched)) | |
307 | return NULL; | |
308 | ||
309 | /* Kernel run queue has higher priority than normal run queue*/ | |
69bd5bf1 CK |
310 | job = amd_sched_rq_select_job(&sched->kernel_rq); |
311 | if (job == NULL) | |
312 | job = amd_sched_rq_select_job(&sched->sched_rq); | |
e688b728 | 313 | |
69bd5bf1 | 314 | return job; |
e688b728 CK |
315 | } |
316 | ||
6f0e54a9 CK |
317 | static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) |
318 | { | |
319 | struct amd_sched_job *sched_job = | |
320 | container_of(cb, struct amd_sched_job, cb); | |
321 | struct amd_gpu_scheduler *sched; | |
6f0e54a9 CK |
322 | |
323 | sched = sched_job->sched; | |
f556cb0c | 324 | amd_sched_fence_signal(sched_job->s_fence); |
c746ba22 | 325 | atomic_dec(&sched->hw_rq_count); |
f556cb0c | 326 | fence_put(&sched_job->s_fence->base); |
bd755d08 | 327 | sched->ops->process_job(sched_job); |
c2b6bd7e | 328 | wake_up_interruptible(&sched->wake_up_worker); |
6f0e54a9 CK |
329 | } |
330 | ||
a72ce6f8 JZ |
331 | static int amd_sched_main(void *param) |
332 | { | |
a72ce6f8 | 333 | struct sched_param sparam = {.sched_priority = 1}; |
a72ce6f8 | 334 | struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param; |
f85a6dd9 | 335 | int r; |
a72ce6f8 JZ |
336 | |
337 | sched_setscheduler(current, SCHED_FIFO, &sparam); | |
338 | ||
339 | while (!kthread_should_stop()) { | |
69bd5bf1 | 340 | struct amd_sched_entity *entity; |
f85a6dd9 | 341 | struct amd_sched_job *job; |
6f0e54a9 CK |
342 | struct fence *fence; |
343 | ||
c2b6bd7e | 344 | wait_event_interruptible(sched->wake_up_worker, |
f85a6dd9 | 345 | kthread_should_stop() || |
69bd5bf1 | 346 | (job = amd_sched_select_job(sched))); |
f85a6dd9 | 347 | |
69bd5bf1 | 348 | if (!job) |
f85a6dd9 CK |
349 | continue; |
350 | ||
69bd5bf1 | 351 | entity = job->s_entity; |
b034b572 | 352 | atomic_inc(&sched->hw_rq_count); |
bd755d08 | 353 | fence = sched->ops->run_job(job); |
6f0e54a9 | 354 | if (fence) { |
953e8fd4 | 355 | r = fence_add_callback(fence, &job->cb, |
6f0e54a9 CK |
356 | amd_sched_process_job); |
357 | if (r == -ENOENT) | |
953e8fd4 | 358 | amd_sched_process_job(fence, &job->cb); |
6f0e54a9 CK |
359 | else if (r) |
360 | DRM_ERROR("fence add callback failed (%d)\n", r); | |
361 | fence_put(fence); | |
362 | } | |
aef4852e | 363 | |
69bd5bf1 | 364 | kfifo_out(&entity->job_queue, &job, sizeof(job)); |
c2b6bd7e | 365 | wake_up(&sched->job_scheduled); |
a72ce6f8 JZ |
366 | } |
367 | return 0; | |
368 | } | |
369 | ||
a72ce6f8 JZ |
370 | /** |
371 | * Create a gpu scheduler | |
372 | * | |
69f7dd65 CK |
373 | * @ops The backend operations for this scheduler. |
374 | * @ring The the ring id for the scheduler. | |
375 | * @hw_submissions Number of hw submissions to do. | |
a72ce6f8 | 376 | * |
69f7dd65 | 377 | * Return the pointer to scheduler for success, otherwise return NULL |
a72ce6f8 | 378 | */ |
69f7dd65 | 379 | struct amd_gpu_scheduler *amd_sched_create(struct amd_sched_backend_ops *ops, |
f38fdfdd CZ |
380 | unsigned ring, unsigned hw_submission, |
381 | void *priv) | |
a72ce6f8 JZ |
382 | { |
383 | struct amd_gpu_scheduler *sched; | |
a72ce6f8 JZ |
384 | |
385 | sched = kzalloc(sizeof(struct amd_gpu_scheduler), GFP_KERNEL); | |
386 | if (!sched) | |
387 | return NULL; | |
388 | ||
a72ce6f8 | 389 | sched->ops = ops; |
a72ce6f8 | 390 | sched->ring_id = ring; |
4cef9267 | 391 | sched->hw_submission_limit = hw_submission; |
f38fdfdd | 392 | sched->priv = priv; |
c14692f0 | 393 | snprintf(sched->name, sizeof(sched->name), "amdgpu[%d]", ring); |
432a4ff8 CK |
394 | amd_sched_rq_init(&sched->sched_rq); |
395 | amd_sched_rq_init(&sched->kernel_rq); | |
a72ce6f8 | 396 | |
c2b6bd7e CK |
397 | init_waitqueue_head(&sched->wake_up_worker); |
398 | init_waitqueue_head(&sched->job_scheduled); | |
c746ba22 | 399 | atomic_set(&sched->hw_rq_count, 0); |
a72ce6f8 | 400 | /* Each scheduler will run on a seperate kernel thread */ |
c14692f0 | 401 | sched->thread = kthread_run(amd_sched_main, sched, sched->name); |
f4956598 CK |
402 | if (IS_ERR(sched->thread)) { |
403 | DRM_ERROR("Failed to create scheduler for id %d.\n", ring); | |
404 | kfree(sched); | |
405 | return NULL; | |
a72ce6f8 JZ |
406 | } |
407 | ||
f4956598 | 408 | return sched; |
a72ce6f8 JZ |
409 | } |
410 | ||
411 | /** | |
412 | * Destroy a gpu scheduler | |
413 | * | |
414 | * @sched The pointer to the scheduler | |
415 | * | |
416 | * return 0 if succeed. -1 if failed. | |
417 | */ | |
418 | int amd_sched_destroy(struct amd_gpu_scheduler *sched) | |
419 | { | |
420 | kthread_stop(sched->thread); | |
a72ce6f8 JZ |
421 | kfree(sched); |
422 | return 0; | |
423 | } |