drm/amdgpu: Add H/W agnostic amdgpu <--> amdkfd interface
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd.c
CommitLineData
130e0371
OG
1/*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23#include "amdgpu_amdkfd.h"
24#include "amdgpu_family.h"
25#include <drm/drmP.h>
26#include "amdgpu.h"
27#include <linux/module.h>
28
29const struct kfd2kgd_calls *kfd2kgd;
30const struct kgd2kfd_calls *kgd2kfd;
31bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
32
33bool amdgpu_amdkfd_init(void)
34{
35#if defined(CONFIG_HSA_AMD_MODULE)
36 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
37
38 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
39
40 if (kgd2kfd_init_p == NULL)
41 return false;
42#endif
43 return true;
44}
45
46bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
47{
48#if defined(CONFIG_HSA_AMD_MODULE)
49 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
50#endif
51
52 switch (rdev->asic_type) {
53 case CHIP_KAVERI:
54 default:
55 return false;
56 }
57
58#if defined(CONFIG_HSA_AMD_MODULE)
59 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
60
61 if (kgd2kfd_init_p == NULL) {
62 kfd2kgd = NULL;
63 return false;
64 }
65
66 if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
67 symbol_put(kgd2kfd_init);
68 kfd2kgd = NULL;
69 kgd2kfd = NULL;
70
71 return false;
72 }
73
74 return true;
75#elif defined(CONFIG_HSA_AMD)
76 if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
77 kfd2kgd = NULL;
78 kgd2kfd = NULL;
79 return false;
80 }
81
82 return true;
83#else
84 kfd2kgd = NULL;
85 return false;
86#endif
87}
88
89void amdgpu_amdkfd_fini(void)
90{
91 if (kgd2kfd) {
92 kgd2kfd->exit();
93 symbol_put(kgd2kfd_init);
94 }
95}
96
97void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
98{
99 if (kgd2kfd)
100 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
101 rdev->pdev, kfd2kgd);
102}
103
104void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
105{
106 if (rdev->kfd) {
107 struct kgd2kfd_shared_resources gpu_resources = {
108 .compute_vmid_bitmap = 0xFF00,
109
110 .first_compute_pipe = 1,
111 .compute_pipe_count = 4 - 1,
112 };
113
114 amdgpu_doorbell_get_kfd_info(rdev,
115 &gpu_resources.doorbell_physical_address,
116 &gpu_resources.doorbell_aperture_size,
117 &gpu_resources.doorbell_start_offset);
118
119 kgd2kfd->device_init(rdev->kfd, &gpu_resources);
120 }
121}
122
123void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
124{
125 if (rdev->kfd) {
126 kgd2kfd->device_exit(rdev->kfd);
127 rdev->kfd = NULL;
128 }
129}
130
131void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
132 const void *ih_ring_entry)
133{
134 if (rdev->kfd)
135 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
136}
137
138void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
139{
140 if (rdev->kfd)
141 kgd2kfd->suspend(rdev->kfd);
142}
143
144int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
145{
146 int r = 0;
147
148 if (rdev->kfd)
149 r = kgd2kfd->resume(rdev->kfd);
150
151 return r;
152}
153
154u32 pool_to_domain(enum kgd_memory_pool p)
155{
156 switch (p) {
157 case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
158 default: return AMDGPU_GEM_DOMAIN_GTT;
159 }
160}
161
162int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
163 void **mem_obj, uint64_t *gpu_addr,
164 void **cpu_ptr)
165{
166 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
167 struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
168 int r;
169
170 BUG_ON(kgd == NULL);
171 BUG_ON(gpu_addr == NULL);
172 BUG_ON(cpu_ptr == NULL);
173
174 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
175 if ((*mem) == NULL)
176 return -ENOMEM;
177
178 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
179 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
180 if (r) {
181 dev_err(rdev->dev,
182 "failed to allocate BO for amdkfd (%d)\n", r);
183 return r;
184 }
185
186 /* map the buffer */
187 r = amdgpu_bo_reserve((*mem)->bo, true);
188 if (r) {
189 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
190 goto allocate_mem_reserve_bo_failed;
191 }
192
193 r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
194 &(*mem)->gpu_addr);
195 if (r) {
196 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
197 goto allocate_mem_pin_bo_failed;
198 }
199 *gpu_addr = (*mem)->gpu_addr;
200
201 r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
202 if (r) {
203 dev_err(rdev->dev,
204 "(%d) failed to map bo to kernel for amdkfd\n", r);
205 goto allocate_mem_kmap_bo_failed;
206 }
207 *cpu_ptr = (*mem)->cpu_ptr;
208
209 amdgpu_bo_unreserve((*mem)->bo);
210
211 return 0;
212
213allocate_mem_kmap_bo_failed:
214 amdgpu_bo_unpin((*mem)->bo);
215allocate_mem_pin_bo_failed:
216 amdgpu_bo_unreserve((*mem)->bo);
217allocate_mem_reserve_bo_failed:
218 amdgpu_bo_unref(&(*mem)->bo);
219
220 return r;
221}
222
223void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
224{
225 struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
226
227 BUG_ON(mem == NULL);
228
229 amdgpu_bo_reserve(mem->bo, true);
230 amdgpu_bo_kunmap(mem->bo);
231 amdgpu_bo_unpin(mem->bo);
232 amdgpu_bo_unreserve(mem->bo);
233 amdgpu_bo_unref(&(mem->bo));
234 kfree(mem);
235}
236
237uint64_t get_vmem_size(struct kgd_dev *kgd)
238{
239 struct amdgpu_device *rdev =
240 (struct amdgpu_device *)kgd;
241
242 BUG_ON(kgd == NULL);
243
244 return rdev->mc.real_vram_size;
245}
246
247uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
248{
249 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
250
251 if (rdev->asic_funcs->get_gpu_clock_counter)
252 return rdev->asic_funcs->get_gpu_clock_counter(rdev);
253 return 0;
254}
255
256uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
257{
258 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
259
260 /* The sclk is in quantas of 10kHz */
261 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
262}
This page took 0.042858 seconds and 5 git commands to generate.