drm/amd: add basic scheduling framework
[deliverable/linux.git] / drivers / gpu / drm / amd / scheduler / gpu_scheduler.h
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #ifndef _GPU_SCHEDULER_H_
25 #define _GPU_SCHEDULER_H_
26
27 #include <linux/kfifo.h>
28
29 #define AMD_MAX_ACTIVE_HW_SUBMISSION 2
30 #define AMD_MAX_JOB_ENTRY_PER_CONTEXT 16
31
32 #define AMD_KERNEL_CONTEXT_ID 0
33 #define AMD_KERNEL_PROCESS_ID 0
34
35 #define AMD_GPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
36
37 struct amd_gpu_scheduler;
38 struct amd_run_queue;
39
40 /**
41 * A scheduler entity is a wrapper around a job queue or a group
42 * of other entities. Entities take turns emitting jobs from their
43 * job queues to corresponding hardware ring based on scheduling
44 * policy.
45 */
46 struct amd_sched_entity {
47 struct list_head list;
48 struct amd_run_queue *belongto_rq;
49 struct amd_sched_entity *parent;
50 };
51
52 /**
53 * Run queue is a set of entities scheduling command submissions for
54 * one specific ring. It implements the scheduling policy that selects
55 * the next entity to emit commands from.
56 */
57 struct amd_run_queue {
58 struct mutex lock;
59 atomic_t nr_entity;
60 struct amd_sched_entity head;
61 struct amd_sched_entity *current_entity;
62 /**
63 * Return 0 means this entity can be scheduled
64 * Return -1 means this entity cannot be scheduled for reasons,
65 * i.e, it is the head, or these is no job, etc
66 */
67 int (*check_entity_status)(struct amd_sched_entity *entity);
68 };
69
70 /**
71 * Context based scheduler entity, there can be multiple entities for
72 * each context, and one entity per ring
73 */
74 struct amd_context_entity {
75 struct amd_sched_entity generic_entity;
76 spinlock_t lock;
77 /* the virtual_seq is unique per context per ring */
78 atomic64_t last_queued_v_seq;
79 atomic64_t last_emitted_v_seq;
80 atomic64_t last_signaled_v_seq;
81 pid_t tgid;
82 uint32_t context_id;
83 /* the job_queue maintains the jobs submitted by clients */
84 struct kfifo job_queue;
85 spinlock_t queue_lock;
86 struct amd_gpu_scheduler *scheduler;
87 wait_queue_head_t wait_queue;
88 wait_queue_head_t wait_emit;
89 bool is_pending;
90 };
91
92 /**
93 * Define the backend operations called by the scheduler,
94 * these functions should be implemented in driver side
95 */
96 struct amd_sched_backend_ops {
97 int (*prepare_job)(struct amd_gpu_scheduler *sched,
98 struct amd_context_entity *c_entity,
99 void *job);
100 void (*run_job)(struct amd_gpu_scheduler *sched,
101 struct amd_context_entity *c_entity,
102 void *job);
103 void (*process_job)(struct amd_gpu_scheduler *sched, void *job);
104 };
105
106 /**
107 * One scheduler is implemented for each hardware ring
108 */
109 struct amd_gpu_scheduler {
110 void *device;
111 struct task_struct *thread;
112 struct amd_run_queue sched_rq;
113 struct amd_run_queue kernel_rq;
114 struct kfifo active_hw_rq;
115 struct amd_sched_backend_ops *ops;
116 uint32_t ring_id;
117 uint32_t granularity; /* in ms unit */
118 uint32_t preemption;
119 uint64_t last_handled_seq;
120 wait_queue_head_t wait_queue;
121 struct amd_context_entity *current_entity;
122 struct mutex sched_lock;
123 spinlock_t queue_lock;
124 };
125
126
127 struct amd_gpu_scheduler *amd_sched_create(void *device,
128 struct amd_sched_backend_ops *ops,
129 uint32_t ring,
130 uint32_t granularity,
131 uint32_t preemption);
132
133 int amd_sched_destroy(struct amd_gpu_scheduler *sched);
134
135 int amd_sched_push_job(struct amd_gpu_scheduler *sched,
136 struct amd_context_entity *c_entity,
137 void *job);
138
139 int amd_sched_check_ts(struct amd_context_entity *c_entity, uint64_t seq);
140
141 int amd_sched_wait_signal(struct amd_context_entity *c_entity,
142 uint64_t seq, bool intr, long timeout);
143 int amd_sched_wait_emit(struct amd_context_entity *c_entity,
144 uint64_t seq,
145 bool intr,
146 long timeout);
147
148 void amd_sched_isr(struct amd_gpu_scheduler *sched);
149 uint64_t amd_sched_get_handled_seq(struct amd_gpu_scheduler *sched);
150
151 int amd_context_entity_fini(struct amd_gpu_scheduler *sched,
152 struct amd_context_entity *entity);
153
154 int amd_context_entity_init(struct amd_gpu_scheduler *sched,
155 struct amd_context_entity *entity,
156 struct amd_sched_entity *parent,
157 struct amd_run_queue *rq,
158 uint32_t context_id);
159
160 #endif
This page took 0.057082 seconds and 5 git commands to generate.