2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
27 #include <linux/kfifo.h>
29 #define AMD_MAX_ACTIVE_HW_SUBMISSION 2
30 #define AMD_MAX_JOB_ENTRY_PER_CONTEXT 16
32 #define AMD_KERNEL_CONTEXT_ID 0
33 #define AMD_KERNEL_PROCESS_ID 0
35 #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
37 struct amd_gpu_scheduler
;
41 * A scheduler entity is a wrapper around a job queue or a group
42 * of other entities. Entities take turns emitting jobs from their
43 * job queues to corresponding hardware ring based on scheduling
46 struct amd_sched_entity
{
47 struct list_head list
;
48 struct amd_run_queue
*belongto_rq
;
49 struct amd_sched_entity
*parent
;
53 * Run queue is a set of entities scheduling command submissions for
54 * one specific ring. It implements the scheduling policy that selects
55 * the next entity to emit commands from.
57 struct amd_run_queue
{
60 struct amd_sched_entity head
;
61 struct amd_sched_entity
*current_entity
;
63 * Return 0 means this entity can be scheduled
64 * Return -1 means this entity cannot be scheduled for reasons,
65 * i.e, it is the head, or these is no job, etc
67 int (*check_entity_status
)(struct amd_sched_entity
*entity
);
71 * Context based scheduler entity, there can be multiple entities for
72 * each context, and one entity per ring
74 struct amd_context_entity
{
75 struct amd_sched_entity generic_entity
;
77 /* the virtual_seq is unique per context per ring */
78 atomic64_t last_queued_v_seq
;
79 atomic64_t last_emitted_v_seq
;
80 atomic64_t last_signaled_v_seq
;
83 /* the job_queue maintains the jobs submitted by clients */
84 struct kfifo job_queue
;
85 spinlock_t queue_lock
;
86 struct amd_gpu_scheduler
*scheduler
;
87 wait_queue_head_t wait_queue
;
88 wait_queue_head_t wait_emit
;
93 * Define the backend operations called by the scheduler,
94 * these functions should be implemented in driver side
96 struct amd_sched_backend_ops
{
97 int (*prepare_job
)(struct amd_gpu_scheduler
*sched
,
98 struct amd_context_entity
*c_entity
,
100 void (*run_job
)(struct amd_gpu_scheduler
*sched
,
101 struct amd_context_entity
*c_entity
,
103 void (*process_job
)(struct amd_gpu_scheduler
*sched
, void *job
);
107 * One scheduler is implemented for each hardware ring
109 struct amd_gpu_scheduler
{
111 struct task_struct
*thread
;
112 struct amd_run_queue sched_rq
;
113 struct amd_run_queue kernel_rq
;
114 struct kfifo active_hw_rq
;
115 struct amd_sched_backend_ops
*ops
;
117 uint32_t granularity
; /* in ms unit */
119 uint64_t last_handled_seq
;
120 wait_queue_head_t wait_queue
;
121 struct amd_context_entity
*current_entity
;
122 struct mutex sched_lock
;
123 spinlock_t queue_lock
;
127 struct amd_gpu_scheduler
*amd_sched_create(void *device
,
128 struct amd_sched_backend_ops
*ops
,
130 uint32_t granularity
,
131 uint32_t preemption
);
133 int amd_sched_destroy(struct amd_gpu_scheduler
*sched
);
135 int amd_sched_push_job(struct amd_gpu_scheduler
*sched
,
136 struct amd_context_entity
*c_entity
,
139 int amd_sched_check_ts(struct amd_context_entity
*c_entity
, uint64_t seq
);
141 int amd_sched_wait_signal(struct amd_context_entity
*c_entity
,
142 uint64_t seq
, bool intr
, long timeout
);
143 int amd_sched_wait_emit(struct amd_context_entity
*c_entity
,
148 void amd_sched_isr(struct amd_gpu_scheduler
*sched
);
149 uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler
*sched
);
151 int amd_context_entity_fini(struct amd_gpu_scheduler
*sched
,
152 struct amd_context_entity
*entity
);
154 int amd_context_entity_init(struct amd_gpu_scheduler
*sched
,
155 struct amd_context_entity
*entity
,
156 struct amd_sched_entity
*parent
,
157 struct amd_run_queue
*rq
,
158 uint32_t context_id
);