drm/amdgpu: merge amdgpu_family.h into amd_shared.h (v2)
[deliverable/linux.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd.c
1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "amdgpu_amdkfd.h"
24 #include "amd_shared.h"
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include <linux/module.h>
28
29 const struct kfd2kgd_calls *kfd2kgd;
30 const struct kgd2kfd_calls *kgd2kfd;
31 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
32
33 bool amdgpu_amdkfd_init(void)
34 {
35 #if defined(CONFIG_HSA_AMD_MODULE)
36 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
37
38 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
39
40 if (kgd2kfd_init_p == NULL)
41 return false;
42 #endif
43 return true;
44 }
45
46 bool amdgpu_amdkfd_load_interface(struct amdgpu_device *rdev)
47 {
48 #if defined(CONFIG_HSA_AMD_MODULE)
49 bool (*kgd2kfd_init_p)(unsigned, const struct kgd2kfd_calls**);
50 #endif
51
52 switch (rdev->asic_type) {
53 case CHIP_KAVERI:
54 kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions();
55 break;
56 case CHIP_CARRIZO:
57 kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions();
58 break;
59 default:
60 return false;
61 }
62
63 #if defined(CONFIG_HSA_AMD_MODULE)
64 kgd2kfd_init_p = symbol_request(kgd2kfd_init);
65
66 if (kgd2kfd_init_p == NULL) {
67 kfd2kgd = NULL;
68 return false;
69 }
70
71 if (!kgd2kfd_init_p(KFD_INTERFACE_VERSION, &kgd2kfd)) {
72 symbol_put(kgd2kfd_init);
73 kfd2kgd = NULL;
74 kgd2kfd = NULL;
75
76 return false;
77 }
78
79 return true;
80 #elif defined(CONFIG_HSA_AMD)
81 if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kgd2kfd)) {
82 kfd2kgd = NULL;
83 kgd2kfd = NULL;
84 return false;
85 }
86
87 return true;
88 #else
89 kfd2kgd = NULL;
90 return false;
91 #endif
92 }
93
94 void amdgpu_amdkfd_fini(void)
95 {
96 if (kgd2kfd) {
97 kgd2kfd->exit();
98 symbol_put(kgd2kfd_init);
99 }
100 }
101
102 void amdgpu_amdkfd_device_probe(struct amdgpu_device *rdev)
103 {
104 if (kgd2kfd)
105 rdev->kfd = kgd2kfd->probe((struct kgd_dev *)rdev,
106 rdev->pdev, kfd2kgd);
107 }
108
109 void amdgpu_amdkfd_device_init(struct amdgpu_device *rdev)
110 {
111 if (rdev->kfd) {
112 struct kgd2kfd_shared_resources gpu_resources = {
113 .compute_vmid_bitmap = 0xFF00,
114
115 .first_compute_pipe = 1,
116 .compute_pipe_count = 4 - 1,
117 };
118
119 amdgpu_doorbell_get_kfd_info(rdev,
120 &gpu_resources.doorbell_physical_address,
121 &gpu_resources.doorbell_aperture_size,
122 &gpu_resources.doorbell_start_offset);
123
124 kgd2kfd->device_init(rdev->kfd, &gpu_resources);
125 }
126 }
127
128 void amdgpu_amdkfd_device_fini(struct amdgpu_device *rdev)
129 {
130 if (rdev->kfd) {
131 kgd2kfd->device_exit(rdev->kfd);
132 rdev->kfd = NULL;
133 }
134 }
135
136 void amdgpu_amdkfd_interrupt(struct amdgpu_device *rdev,
137 const void *ih_ring_entry)
138 {
139 if (rdev->kfd)
140 kgd2kfd->interrupt(rdev->kfd, ih_ring_entry);
141 }
142
143 void amdgpu_amdkfd_suspend(struct amdgpu_device *rdev)
144 {
145 if (rdev->kfd)
146 kgd2kfd->suspend(rdev->kfd);
147 }
148
149 int amdgpu_amdkfd_resume(struct amdgpu_device *rdev)
150 {
151 int r = 0;
152
153 if (rdev->kfd)
154 r = kgd2kfd->resume(rdev->kfd);
155
156 return r;
157 }
158
159 u32 pool_to_domain(enum kgd_memory_pool p)
160 {
161 switch (p) {
162 case KGD_POOL_FRAMEBUFFER: return AMDGPU_GEM_DOMAIN_VRAM;
163 default: return AMDGPU_GEM_DOMAIN_GTT;
164 }
165 }
166
167 int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
168 void **mem_obj, uint64_t *gpu_addr,
169 void **cpu_ptr)
170 {
171 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
172 struct kgd_mem **mem = (struct kgd_mem **) mem_obj;
173 int r;
174
175 BUG_ON(kgd == NULL);
176 BUG_ON(gpu_addr == NULL);
177 BUG_ON(cpu_ptr == NULL);
178
179 *mem = kmalloc(sizeof(struct kgd_mem), GFP_KERNEL);
180 if ((*mem) == NULL)
181 return -ENOMEM;
182
183 r = amdgpu_bo_create(rdev, size, PAGE_SIZE, true, AMDGPU_GEM_DOMAIN_GTT,
184 AMDGPU_GEM_CREATE_CPU_GTT_USWC, NULL, &(*mem)->bo);
185 if (r) {
186 dev_err(rdev->dev,
187 "failed to allocate BO for amdkfd (%d)\n", r);
188 return r;
189 }
190
191 /* map the buffer */
192 r = amdgpu_bo_reserve((*mem)->bo, true);
193 if (r) {
194 dev_err(rdev->dev, "(%d) failed to reserve bo for amdkfd\n", r);
195 goto allocate_mem_reserve_bo_failed;
196 }
197
198 r = amdgpu_bo_pin((*mem)->bo, AMDGPU_GEM_DOMAIN_GTT,
199 &(*mem)->gpu_addr);
200 if (r) {
201 dev_err(rdev->dev, "(%d) failed to pin bo for amdkfd\n", r);
202 goto allocate_mem_pin_bo_failed;
203 }
204 *gpu_addr = (*mem)->gpu_addr;
205
206 r = amdgpu_bo_kmap((*mem)->bo, &(*mem)->cpu_ptr);
207 if (r) {
208 dev_err(rdev->dev,
209 "(%d) failed to map bo to kernel for amdkfd\n", r);
210 goto allocate_mem_kmap_bo_failed;
211 }
212 *cpu_ptr = (*mem)->cpu_ptr;
213
214 amdgpu_bo_unreserve((*mem)->bo);
215
216 return 0;
217
218 allocate_mem_kmap_bo_failed:
219 amdgpu_bo_unpin((*mem)->bo);
220 allocate_mem_pin_bo_failed:
221 amdgpu_bo_unreserve((*mem)->bo);
222 allocate_mem_reserve_bo_failed:
223 amdgpu_bo_unref(&(*mem)->bo);
224
225 return r;
226 }
227
228 void free_gtt_mem(struct kgd_dev *kgd, void *mem_obj)
229 {
230 struct kgd_mem *mem = (struct kgd_mem *) mem_obj;
231
232 BUG_ON(mem == NULL);
233
234 amdgpu_bo_reserve(mem->bo, true);
235 amdgpu_bo_kunmap(mem->bo);
236 amdgpu_bo_unpin(mem->bo);
237 amdgpu_bo_unreserve(mem->bo);
238 amdgpu_bo_unref(&(mem->bo));
239 kfree(mem);
240 }
241
242 uint64_t get_vmem_size(struct kgd_dev *kgd)
243 {
244 struct amdgpu_device *rdev =
245 (struct amdgpu_device *)kgd;
246
247 BUG_ON(kgd == NULL);
248
249 return rdev->mc.real_vram_size;
250 }
251
252 uint64_t get_gpu_clock_counter(struct kgd_dev *kgd)
253 {
254 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
255
256 if (rdev->asic_funcs->get_gpu_clock_counter)
257 return rdev->asic_funcs->get_gpu_clock_counter(rdev);
258 return 0;
259 }
260
261 uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
262 {
263 struct amdgpu_device *rdev = (struct amdgpu_device *)kgd;
264
265 /* The sclk is in quantas of 10kHz */
266 return rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk / 100;
267 }
This page took 0.037008 seconds and 5 git commands to generate.