Merge tag 'armsoc-dt2' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[deliverable/linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.c
CommitLineData
a72ce6f8
JZ
1/*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 *
23 */
24#include <linux/kthread.h>
25#include <linux/wait.h>
26#include <linux/sched.h>
27#include <drm/drmP.h>
28#include "gpu_scheduler.h"
29
353da3c5
CZ
30#define CREATE_TRACE_POINTS
31#include "gpu_sched_trace.h"
32
3d651936 33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
88079006
CK
34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
35
f5617f9d
CZ
36struct kmem_cache *sched_fence_slab;
37atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
38
a72ce6f8 39/* Initialize a given run queue struct */
432a4ff8 40static void amd_sched_rq_init(struct amd_sched_rq *rq)
a72ce6f8 41{
2b184d8d 42 spin_lock_init(&rq->lock);
432a4ff8 43 INIT_LIST_HEAD(&rq->entities);
432a4ff8 44 rq->current_entity = NULL;
a72ce6f8
JZ
45}
46
432a4ff8
CK
47static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
48 struct amd_sched_entity *entity)
a72ce6f8 49{
e8deea2d
CZ
50 if (!list_empty(&entity->list))
51 return;
2b184d8d 52 spin_lock(&rq->lock);
432a4ff8 53 list_add_tail(&entity->list, &rq->entities);
2b184d8d 54 spin_unlock(&rq->lock);
a72ce6f8
JZ
55}
56
432a4ff8
CK
57static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
58 struct amd_sched_entity *entity)
a72ce6f8 59{
e8deea2d
CZ
60 if (list_empty(&entity->list))
61 return;
2b184d8d 62 spin_lock(&rq->lock);
432a4ff8
CK
63 list_del_init(&entity->list);
64 if (rq->current_entity == entity)
65 rq->current_entity = NULL;
2b184d8d 66 spin_unlock(&rq->lock);
a72ce6f8
JZ
67}
68
69/**
3d651936
CK
70 * Select an entity which could provide a job to run
71 *
72 * @rq The run queue to check.
73 *
74 * Try to find a ready entity, returns NULL if none found.
a72ce6f8 75 */
3d651936
CK
76static struct amd_sched_entity *
77amd_sched_rq_select_entity(struct amd_sched_rq *rq)
a72ce6f8 78{
2b184d8d 79 struct amd_sched_entity *entity;
432a4ff8 80
2b184d8d
CK
81 spin_lock(&rq->lock);
82
83 entity = rq->current_entity;
432a4ff8
CK
84 if (entity) {
85 list_for_each_entry_continue(entity, &rq->entities, list) {
3d651936 86 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 87 rq->current_entity = entity;
2b184d8d 88 spin_unlock(&rq->lock);
3d651936 89 return entity;
432a4ff8 90 }
a72ce6f8 91 }
a72ce6f8 92 }
a72ce6f8 93
432a4ff8 94 list_for_each_entry(entity, &rq->entities, list) {
a72ce6f8 95
3d651936 96 if (amd_sched_entity_is_ready(entity)) {
432a4ff8 97 rq->current_entity = entity;
2b184d8d 98 spin_unlock(&rq->lock);
3d651936 99 return entity;
432a4ff8 100 }
a72ce6f8 101
432a4ff8
CK
102 if (entity == rq->current_entity)
103 break;
104 }
a72ce6f8 105
2b184d8d
CK
106 spin_unlock(&rq->lock);
107
432a4ff8 108 return NULL;
a72ce6f8
JZ
109}
110
a72ce6f8
JZ
111/**
112 * Init a context entity used by scheduler when submit to HW ring.
113 *
114 * @sched The pointer to the scheduler
91404fb2 115 * @entity The pointer to a valid amd_sched_entity
a72ce6f8 116 * @rq The run queue this entity belongs
0e89d0c1 117 * @kernel If this is an entity for the kernel
1333f723 118 * @jobs The max number of jobs in the job queue
a72ce6f8
JZ
119 *
120 * return 0 if succeed. negative error code on failure
121*/
91404fb2 122int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
6f0e54a9 123 struct amd_sched_entity *entity,
432a4ff8 124 struct amd_sched_rq *rq,
6f0e54a9 125 uint32_t jobs)
a72ce6f8 126{
0f75aee7
CK
127 int r;
128
a72ce6f8
JZ
129 if (!(sched && entity && rq))
130 return -EINVAL;
131
91404fb2 132 memset(entity, 0, sizeof(struct amd_sched_entity));
0f75aee7
CK
133 INIT_LIST_HEAD(&entity->list);
134 entity->rq = rq;
135 entity->sched = sched;
a72ce6f8
JZ
136
137 spin_lock_init(&entity->queue_lock);
0f75aee7
CK
138 r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
139 if (r)
140 return r;
141
ce882e6d 142 atomic_set(&entity->fence_seq, 0);
0f75aee7 143 entity->fence_context = fence_context_alloc(1);
a72ce6f8 144
a72ce6f8
JZ
145 return 0;
146}
147
148/**
149 * Query if entity is initialized
150 *
151 * @sched Pointer to scheduler instance
152 * @entity The pointer to a valid scheduler entity
153 *
154 * return true if entity is initialized, false otherwise
155*/
d54fdb94
CK
156static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
157 struct amd_sched_entity *entity)
a72ce6f8 158{
0f75aee7
CK
159 return entity->sched == sched &&
160 entity->rq != NULL;
a72ce6f8
JZ
161}
162
aef4852e
CK
163/**
164 * Check if entity is idle
165 *
166 * @entity The pointer to a valid scheduler entity
167 *
168 * Return true if entity don't has any unscheduled jobs.
169 */
170static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
a72ce6f8 171{
aef4852e
CK
172 rmb();
173 if (kfifo_is_empty(&entity->job_queue))
a72ce6f8
JZ
174 return true;
175
176 return false;
177}
178
3d651936
CK
179/**
180 * Check if entity is ready
181 *
182 * @entity The pointer to a valid scheduler entity
183 *
184 * Return true if entity could provide a job.
185 */
186static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
187{
188 if (kfifo_is_empty(&entity->job_queue))
189 return false;
190
191 if (ACCESS_ONCE(entity->dependency))
192 return false;
193
194 return true;
195}
196
a72ce6f8
JZ
197/**
198 * Destroy a context entity
199 *
200 * @sched Pointer to scheduler instance
201 * @entity The pointer to a valid scheduler entity
202 *
062c7fb3 203 * Cleanup and free the allocated resources.
a72ce6f8 204 */
062c7fb3
CK
205void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
206 struct amd_sched_entity *entity)
a72ce6f8 207{
0f75aee7 208 struct amd_sched_rq *rq = entity->rq;
a72ce6f8 209
d54fdb94 210 if (!amd_sched_entity_is_initialized(sched, entity))
062c7fb3 211 return;
6c859274 212
a72ce6f8
JZ
213 /**
214 * The client will not queue more IBs during this fini, consume existing
215 * queued IBs
216 */
c2b6bd7e 217 wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
a72ce6f8 218
432a4ff8 219 amd_sched_rq_remove_entity(rq, entity);
a72ce6f8 220 kfifo_free(&entity->job_queue);
a72ce6f8
JZ
221}
222
e61235db
CK
223static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
224{
225 struct amd_sched_entity *entity =
226 container_of(cb, struct amd_sched_entity, cb);
227 entity->dependency = NULL;
228 fence_put(f);
0f75aee7 229 amd_sched_wakeup(entity->sched);
e61235db
CK
230}
231
777dbd45
ML
232static void amd_sched_entity_clear_dep(struct fence *f, struct fence_cb *cb)
233{
234 struct amd_sched_entity *entity =
235 container_of(cb, struct amd_sched_entity, cb);
236 entity->dependency = NULL;
237 fence_put(f);
238}
239
393a0bd4
CK
240static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
241{
242 struct amd_gpu_scheduler *sched = entity->sched;
243 struct fence * fence = entity->dependency;
244 struct amd_sched_fence *s_fence;
245
246 if (fence->context == entity->fence_context) {
247 /* We can ignore fences from ourself */
248 fence_put(entity->dependency);
249 return false;
250 }
251
252 s_fence = to_amd_sched_fence(fence);
253 if (s_fence && s_fence->sched == sched) {
254 /* Fence is from the same scheduler */
255 if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
256 /* Ignore it when it is already scheduled */
257 fence_put(entity->dependency);
258 return false;
259 }
260
261 /* Wait for fence to be scheduled */
777dbd45 262 entity->cb.func = amd_sched_entity_clear_dep;
393a0bd4
CK
263 list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
264 return true;
265 }
266
267 if (!fence_add_callback(entity->dependency, &entity->cb,
268 amd_sched_entity_wakeup))
269 return true;
270
271 fence_put(entity->dependency);
272 return false;
273}
274
69bd5bf1
CK
275static struct amd_sched_job *
276amd_sched_entity_pop_job(struct amd_sched_entity *entity)
277{
0f75aee7 278 struct amd_gpu_scheduler *sched = entity->sched;
4c7eb91c 279 struct amd_sched_job *sched_job;
69bd5bf1 280
4c7eb91c 281 if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
69bd5bf1
CK
282 return NULL;
283
393a0bd4
CK
284 while ((entity->dependency = sched->ops->dependency(sched_job)))
285 if (amd_sched_entity_add_dependency_cb(entity))
e61235db 286 return NULL;
e61235db 287
4c7eb91c 288 return sched_job;
69bd5bf1
CK
289}
290
a72ce6f8 291/**
6c859274 292 * Helper to submit a job to the job queue
a72ce6f8 293 *
4c7eb91c 294 * @sched_job The pointer to job required to submit
6c859274
CK
295 *
296 * Returns true if we could submit the job.
297 */
4c7eb91c 298static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
a72ce6f8 299{
786b5219 300 struct amd_gpu_scheduler *sched = sched_job->sched;
4c7eb91c 301 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274
CK
302 bool added, first = false;
303
304 spin_lock(&entity->queue_lock);
4c7eb91c
JZ
305 added = kfifo_in(&entity->job_queue, &sched_job,
306 sizeof(sched_job)) == sizeof(sched_job);
6c859274 307
4c7eb91c 308 if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
6c859274
CK
309 first = true;
310
311 spin_unlock(&entity->queue_lock);
312
313 /* first job wakes up scheduler */
e8deea2d
CZ
314 if (first) {
315 /* Add the entity to the run queue */
316 amd_sched_rq_add_entity(entity->rq, entity);
786b5219 317 amd_sched_wakeup(sched);
e8deea2d 318 }
6c859274
CK
319 return added;
320}
321
322/**
323 * Submit a job to the job queue
324 *
4c7eb91c 325 * @sched_job The pointer to job required to submit
6c859274
CK
326 *
327 * Returns 0 for success, negative error code otherwise.
328 */
e2840221 329void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
6c859274
CK
330{
331 struct amd_sched_entity *entity = sched_job->s_entity;
6c859274 332
786b5219 333 trace_amd_sched_job(sched_job);
0f75aee7 334 wait_event(entity->sched->job_scheduled,
c9f0fe5e 335 amd_sched_entity_in(sched_job));
a72ce6f8
JZ
336}
337
e688b728
CK
338/**
339 * Return ture if we can push more jobs to the hw.
340 */
341static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
342{
343 return atomic_read(&sched->hw_rq_count) <
344 sched->hw_submission_limit;
345}
346
88079006
CK
347/**
348 * Wake up the scheduler when it is ready
349 */
350static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
351{
352 if (amd_sched_ready(sched))
c2b6bd7e 353 wake_up_interruptible(&sched->wake_up_worker);
88079006
CK
354}
355
e688b728 356/**
3d651936 357 * Select next entity to process
e688b728 358*/
3d651936
CK
359static struct amd_sched_entity *
360amd_sched_select_entity(struct amd_gpu_scheduler *sched)
e688b728 361{
3d651936 362 struct amd_sched_entity *entity;
d033a6de 363 int i;
e688b728
CK
364
365 if (!amd_sched_ready(sched))
366 return NULL;
367
368 /* Kernel run queue has higher priority than normal run queue*/
d033a6de
CZ
369 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
370 entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
371 if (entity)
372 break;
373 }
e688b728 374
3d651936 375 return entity;
e688b728
CK
376}
377
6f0e54a9
CK
378static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
379{
258f3f99
CK
380 struct amd_sched_fence *s_fence =
381 container_of(cb, struct amd_sched_fence, cb);
9b398fa5 382 struct amd_gpu_scheduler *sched = s_fence->sched;
2440ff2c 383 unsigned long flags;
6f0e54a9 384
c746ba22 385 atomic_dec(&sched->hw_rq_count);
258f3f99 386 amd_sched_fence_signal(s_fence);
2440ff2c 387 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
424839a6 388 cancel_delayed_work(&s_fence->dwork);
2440ff2c
JZ
389 spin_lock_irqsave(&sched->fence_list_lock, flags);
390 list_del_init(&s_fence->list);
391 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
392 }
7034decf 393 trace_amd_sched_process_job(s_fence);
258f3f99 394 fence_put(&s_fence->base);
c2b6bd7e 395 wake_up_interruptible(&sched->wake_up_worker);
6f0e54a9
CK
396}
397
2440ff2c
JZ
398static void amd_sched_fence_work_func(struct work_struct *work)
399{
400 struct amd_sched_fence *s_fence =
401 container_of(work, struct amd_sched_fence, dwork.work);
402 struct amd_gpu_scheduler *sched = s_fence->sched;
403 struct amd_sched_fence *entity, *tmp;
404 unsigned long flags;
405
406 DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
407
408 /* Clean all pending fences */
2fcef6ec 409 spin_lock_irqsave(&sched->fence_list_lock, flags);
2440ff2c
JZ
410 list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
411 DRM_ERROR(" fence no %d\n", entity->base.seqno);
2fcef6ec 412 cancel_delayed_work(&entity->dwork);
2440ff2c 413 list_del_init(&entity->list);
2440ff2c
JZ
414 fence_put(&entity->base);
415 }
2fcef6ec 416 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
2440ff2c
JZ
417}
418
a72ce6f8
JZ
419static int amd_sched_main(void *param)
420{
a72ce6f8 421 struct sched_param sparam = {.sched_priority = 1};
a72ce6f8 422 struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
5134e999 423 int r, count;
a72ce6f8 424
2440ff2c
JZ
425 spin_lock_init(&sched->fence_list_lock);
426 INIT_LIST_HEAD(&sched->fence_list);
a72ce6f8
JZ
427 sched_setscheduler(current, SCHED_FIFO, &sparam);
428
429 while (!kthread_should_stop()) {
69bd5bf1 430 struct amd_sched_entity *entity;
258f3f99 431 struct amd_sched_fence *s_fence;
4c7eb91c 432 struct amd_sched_job *sched_job;
6f0e54a9 433 struct fence *fence;
2440ff2c 434 unsigned long flags;
6f0e54a9 435
c2b6bd7e 436 wait_event_interruptible(sched->wake_up_worker,
3d651936
CK
437 (entity = amd_sched_select_entity(sched)) ||
438 kthread_should_stop());
f85a6dd9 439
3d651936
CK
440 if (!entity)
441 continue;
442
443 sched_job = amd_sched_entity_pop_job(entity);
4c7eb91c 444 if (!sched_job)
f85a6dd9
CK
445 continue;
446
4c7eb91c 447 s_fence = sched_job->s_fence;
2440ff2c
JZ
448
449 if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
450 INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
451 schedule_delayed_work(&s_fence->dwork, sched->timeout);
452 spin_lock_irqsave(&sched->fence_list_lock, flags);
453 list_add_tail(&s_fence->list, &sched->fence_list);
454 spin_unlock_irqrestore(&sched->fence_list_lock, flags);
455 }
456
b034b572 457 atomic_inc(&sched->hw_rq_count);
4c7eb91c 458 fence = sched->ops->run_job(sched_job);
393a0bd4 459 amd_sched_fence_scheduled(s_fence);
6f0e54a9 460 if (fence) {
258f3f99 461 r = fence_add_callback(fence, &s_fence->cb,
6f0e54a9
CK
462 amd_sched_process_job);
463 if (r == -ENOENT)
258f3f99 464 amd_sched_process_job(fence, &s_fence->cb);
6f0e54a9
CK
465 else if (r)
466 DRM_ERROR("fence add callback failed (%d)\n", r);
467 fence_put(fence);
27439fca
CK
468 } else {
469 DRM_ERROR("Failed to run job!\n");
258f3f99 470 amd_sched_process_job(NULL, &s_fence->cb);
6f0e54a9 471 }
aef4852e 472
4c7eb91c
JZ
473 count = kfifo_out(&entity->job_queue, &sched_job,
474 sizeof(sched_job));
475 WARN_ON(count != sizeof(sched_job));
c2b6bd7e 476 wake_up(&sched->job_scheduled);
a72ce6f8
JZ
477 }
478 return 0;
479}
480
a72ce6f8 481/**
4f839a24 482 * Init a gpu scheduler instance
a72ce6f8 483 *
4f839a24 484 * @sched The pointer to the scheduler
69f7dd65 485 * @ops The backend operations for this scheduler.
69f7dd65 486 * @hw_submissions Number of hw submissions to do.
4f839a24 487 * @name Name used for debugging
a72ce6f8 488 *
4f839a24 489 * Return 0 on success, otherwise error code.
a72ce6f8 490*/
4f839a24
CK
491int amd_sched_init(struct amd_gpu_scheduler *sched,
492 struct amd_sched_backend_ops *ops,
2440ff2c 493 unsigned hw_submission, long timeout, const char *name)
a72ce6f8 494{
d033a6de 495 int i;
a72ce6f8 496 sched->ops = ops;
4cef9267 497 sched->hw_submission_limit = hw_submission;
4f839a24 498 sched->name = name;
2440ff2c 499 sched->timeout = timeout;
d033a6de
CZ
500 for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
501 amd_sched_rq_init(&sched->sched_rq[i]);
a72ce6f8 502
c2b6bd7e
CK
503 init_waitqueue_head(&sched->wake_up_worker);
504 init_waitqueue_head(&sched->job_scheduled);
c746ba22 505 atomic_set(&sched->hw_rq_count, 0);
f5617f9d
CZ
506 if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
507 sched_fence_slab = kmem_cache_create(
508 "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
509 SLAB_HWCACHE_ALIGN, NULL);
510 if (!sched_fence_slab)
511 return -ENOMEM;
512 }
4f839a24 513
a72ce6f8 514 /* Each scheduler will run on a seperate kernel thread */
c14692f0 515 sched->thread = kthread_run(amd_sched_main, sched, sched->name);
f4956598 516 if (IS_ERR(sched->thread)) {
4f839a24
CK
517 DRM_ERROR("Failed to create scheduler for %s.\n", name);
518 return PTR_ERR(sched->thread);
a72ce6f8
JZ
519 }
520
4f839a24 521 return 0;
a72ce6f8
JZ
522}
523
524/**
525 * Destroy a gpu scheduler
526 *
527 * @sched The pointer to the scheduler
a72ce6f8 528 */
4f839a24 529void amd_sched_fini(struct amd_gpu_scheduler *sched)
a72ce6f8 530{
32544d02
DA
531 if (sched->thread)
532 kthread_stop(sched->thread);
f5617f9d
CZ
533 if (atomic_dec_and_test(&sched_fence_slab_ref))
534 kmem_cache_destroy(sched_fence_slab);
a72ce6f8 535}
This page took 0.101954 seconds and 5 git commands to generate.