Commit | Line | Data |
---|---|---|
d03846af CZ |
1 | /* |
2 | * Copyright 2015 Advanced Micro Devices, Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * | |
23 | */ | |
57ff96cf CZ |
24 | #include <linux/list.h> |
25 | #include <linux/slab.h> | |
97cb7f6e | 26 | #include <linux/pci.h> |
57ff96cf | 27 | #include <drm/drmP.h> |
bf3911b0 | 28 | #include <linux/firmware.h> |
57ff96cf | 29 | #include <drm/amdgpu_drm.h> |
d03846af CZ |
30 | #include "amdgpu.h" |
31 | #include "cgs_linux.h" | |
25da4427 | 32 | #include "atom.h" |
bf3911b0 JZ |
33 | #include "amdgpu_ucode.h" |
34 | ||
d03846af CZ |
35 | |
36 | struct amdgpu_cgs_device { | |
37 | struct cgs_device base; | |
38 | struct amdgpu_device *adev; | |
39 | }; | |
40 | ||
41 | #define CGS_FUNC_ADEV \ | |
42 | struct amdgpu_device *adev = \ | |
43 | ((struct amdgpu_cgs_device *)cgs_device)->adev | |
44 | ||
45 | static int amdgpu_cgs_gpu_mem_info(void *cgs_device, enum cgs_gpu_mem_type type, | |
46 | uint64_t *mc_start, uint64_t *mc_size, | |
47 | uint64_t *mem_size) | |
48 | { | |
57ff96cf CZ |
49 | CGS_FUNC_ADEV; |
50 | switch(type) { | |
51 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: | |
52 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: | |
53 | *mc_start = 0; | |
54 | *mc_size = adev->mc.visible_vram_size; | |
55 | *mem_size = adev->mc.visible_vram_size - adev->vram_pin_size; | |
56 | break; | |
57 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: | |
58 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: | |
59 | *mc_start = adev->mc.visible_vram_size; | |
60 | *mc_size = adev->mc.real_vram_size - adev->mc.visible_vram_size; | |
61 | *mem_size = *mc_size; | |
62 | break; | |
63 | case CGS_GPU_MEM_TYPE__GART_CACHEABLE: | |
64 | case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: | |
65 | *mc_start = adev->mc.gtt_start; | |
66 | *mc_size = adev->mc.gtt_size; | |
67 | *mem_size = adev->mc.gtt_size - adev->gart_pin_size; | |
68 | break; | |
69 | default: | |
70 | return -EINVAL; | |
71 | } | |
72 | ||
d03846af CZ |
73 | return 0; |
74 | } | |
75 | ||
76 | static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem, | |
77 | uint64_t size, | |
78 | uint64_t min_offset, uint64_t max_offset, | |
79 | cgs_handle_t *kmem_handle, uint64_t *mcaddr) | |
80 | { | |
57ff96cf CZ |
81 | CGS_FUNC_ADEV; |
82 | int ret; | |
83 | struct amdgpu_bo *bo; | |
84 | struct page *kmem_page = vmalloc_to_page(kmem); | |
85 | int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT; | |
86 | ||
87 | struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages); | |
88 | ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false, | |
89 | AMDGPU_GEM_DOMAIN_GTT, 0, sg, &bo); | |
90 | if (ret) | |
91 | return ret; | |
92 | ret = amdgpu_bo_reserve(bo, false); | |
93 | if (unlikely(ret != 0)) | |
94 | return ret; | |
95 | ||
96 | /* pin buffer into GTT */ | |
97 | ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT, | |
98 | min_offset, max_offset, mcaddr); | |
99 | amdgpu_bo_unreserve(bo); | |
100 | ||
101 | *kmem_handle = (cgs_handle_t)bo; | |
102 | return ret; | |
d03846af CZ |
103 | } |
104 | ||
105 | static int amdgpu_cgs_gunmap_kmem(void *cgs_device, cgs_handle_t kmem_handle) | |
106 | { | |
57ff96cf CZ |
107 | struct amdgpu_bo *obj = (struct amdgpu_bo *)kmem_handle; |
108 | ||
109 | if (obj) { | |
110 | int r = amdgpu_bo_reserve(obj, false); | |
111 | if (likely(r == 0)) { | |
112 | amdgpu_bo_unpin(obj); | |
113 | amdgpu_bo_unreserve(obj); | |
114 | } | |
115 | amdgpu_bo_unref(&obj); | |
116 | ||
117 | } | |
d03846af CZ |
118 | return 0; |
119 | } | |
120 | ||
121 | static int amdgpu_cgs_alloc_gpu_mem(void *cgs_device, | |
122 | enum cgs_gpu_mem_type type, | |
123 | uint64_t size, uint64_t align, | |
124 | uint64_t min_offset, uint64_t max_offset, | |
125 | cgs_handle_t *handle) | |
126 | { | |
57ff96cf CZ |
127 | CGS_FUNC_ADEV; |
128 | uint16_t flags = 0; | |
129 | int ret = 0; | |
130 | uint32_t domain = 0; | |
131 | struct amdgpu_bo *obj; | |
132 | struct ttm_placement placement; | |
133 | struct ttm_place place; | |
134 | ||
135 | if (min_offset > max_offset) { | |
136 | BUG_ON(1); | |
137 | return -EINVAL; | |
138 | } | |
139 | ||
140 | /* fail if the alignment is not a power of 2 */ | |
141 | if (((align != 1) && (align & (align - 1))) | |
142 | || size == 0 || align == 0) | |
143 | return -EINVAL; | |
144 | ||
145 | ||
146 | switch(type) { | |
147 | case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB: | |
148 | case CGS_GPU_MEM_TYPE__VISIBLE_FB: | |
149 | flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; | |
150 | domain = AMDGPU_GEM_DOMAIN_VRAM; | |
151 | if (max_offset > adev->mc.real_vram_size) | |
152 | return -EINVAL; | |
153 | place.fpfn = min_offset >> PAGE_SHIFT; | |
154 | place.lpfn = max_offset >> PAGE_SHIFT; | |
155 | place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
156 | TTM_PL_FLAG_VRAM; | |
157 | break; | |
158 | case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB: | |
159 | case CGS_GPU_MEM_TYPE__INVISIBLE_FB: | |
160 | flags = AMDGPU_GEM_CREATE_NO_CPU_ACCESS; | |
161 | domain = AMDGPU_GEM_DOMAIN_VRAM; | |
162 | if (adev->mc.visible_vram_size < adev->mc.real_vram_size) { | |
163 | place.fpfn = | |
164 | max(min_offset, adev->mc.visible_vram_size) >> PAGE_SHIFT; | |
165 | place.lpfn = | |
166 | min(max_offset, adev->mc.real_vram_size) >> PAGE_SHIFT; | |
167 | place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | | |
168 | TTM_PL_FLAG_VRAM; | |
169 | } | |
170 | ||
171 | break; | |
172 | case CGS_GPU_MEM_TYPE__GART_CACHEABLE: | |
173 | domain = AMDGPU_GEM_DOMAIN_GTT; | |
174 | place.fpfn = min_offset >> PAGE_SHIFT; | |
175 | place.lpfn = max_offset >> PAGE_SHIFT; | |
176 | place.flags = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT; | |
177 | break; | |
178 | case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE: | |
179 | flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC; | |
180 | domain = AMDGPU_GEM_DOMAIN_GTT; | |
181 | place.fpfn = min_offset >> PAGE_SHIFT; | |
182 | place.lpfn = max_offset >> PAGE_SHIFT; | |
183 | place.flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT | | |
184 | TTM_PL_FLAG_UNCACHED; | |
185 | break; | |
186 | default: | |
187 | return -EINVAL; | |
188 | } | |
189 | ||
190 | ||
191 | *handle = 0; | |
192 | ||
193 | placement.placement = &place; | |
194 | placement.num_placement = 1; | |
195 | placement.busy_placement = &place; | |
196 | placement.num_busy_placement = 1; | |
197 | ||
198 | ret = amdgpu_bo_create_restricted(adev, size, PAGE_SIZE, | |
199 | true, domain, flags, | |
200 | NULL, &placement, &obj); | |
201 | if (ret) { | |
202 | DRM_ERROR("(%d) bo create failed\n", ret); | |
203 | return ret; | |
204 | } | |
205 | *handle = (cgs_handle_t)obj; | |
206 | ||
207 | return ret; | |
d03846af CZ |
208 | } |
209 | ||
210 | static int amdgpu_cgs_import_gpu_mem(void *cgs_device, int dmabuf_fd, | |
211 | cgs_handle_t *handle) | |
212 | { | |
57ff96cf CZ |
213 | CGS_FUNC_ADEV; |
214 | int r; | |
215 | uint32_t dma_handle; | |
216 | struct drm_gem_object *obj; | |
217 | struct amdgpu_bo *bo; | |
218 | struct drm_device *dev = adev->ddev; | |
219 | struct drm_file *file_priv = NULL, *priv; | |
220 | ||
221 | mutex_lock(&dev->struct_mutex); | |
222 | list_for_each_entry(priv, &dev->filelist, lhead) { | |
223 | rcu_read_lock(); | |
224 | if (priv->pid == get_pid(task_pid(current))) | |
225 | file_priv = priv; | |
226 | rcu_read_unlock(); | |
227 | if (file_priv) | |
228 | break; | |
229 | } | |
230 | mutex_unlock(&dev->struct_mutex); | |
231 | r = dev->driver->prime_fd_to_handle(dev, | |
232 | file_priv, dmabuf_fd, | |
233 | &dma_handle); | |
234 | spin_lock(&file_priv->table_lock); | |
235 | ||
236 | /* Check if we currently have a reference on the object */ | |
237 | obj = idr_find(&file_priv->object_idr, dma_handle); | |
238 | if (obj == NULL) { | |
239 | spin_unlock(&file_priv->table_lock); | |
240 | return -EINVAL; | |
241 | } | |
242 | spin_unlock(&file_priv->table_lock); | |
243 | bo = gem_to_amdgpu_bo(obj); | |
244 | *handle = (cgs_handle_t)bo; | |
d03846af CZ |
245 | return 0; |
246 | } | |
247 | ||
248 | static int amdgpu_cgs_free_gpu_mem(void *cgs_device, cgs_handle_t handle) | |
249 | { | |
57ff96cf CZ |
250 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; |
251 | ||
252 | if (obj) { | |
253 | int r = amdgpu_bo_reserve(obj, false); | |
254 | if (likely(r == 0)) { | |
255 | amdgpu_bo_kunmap(obj); | |
256 | amdgpu_bo_unpin(obj); | |
257 | amdgpu_bo_unreserve(obj); | |
258 | } | |
259 | amdgpu_bo_unref(&obj); | |
260 | ||
261 | } | |
d03846af CZ |
262 | return 0; |
263 | } | |
264 | ||
265 | static int amdgpu_cgs_gmap_gpu_mem(void *cgs_device, cgs_handle_t handle, | |
266 | uint64_t *mcaddr) | |
267 | { | |
57ff96cf CZ |
268 | int r; |
269 | u64 min_offset, max_offset; | |
270 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | |
271 | ||
272 | WARN_ON_ONCE(obj->placement.num_placement > 1); | |
273 | ||
274 | min_offset = obj->placements[0].fpfn << PAGE_SHIFT; | |
275 | max_offset = obj->placements[0].lpfn << PAGE_SHIFT; | |
276 | ||
277 | r = amdgpu_bo_reserve(obj, false); | |
278 | if (unlikely(r != 0)) | |
279 | return r; | |
280 | r = amdgpu_bo_pin_restricted(obj, AMDGPU_GEM_DOMAIN_GTT, | |
281 | min_offset, max_offset, mcaddr); | |
282 | amdgpu_bo_unreserve(obj); | |
283 | return r; | |
d03846af CZ |
284 | } |
285 | ||
286 | static int amdgpu_cgs_gunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) | |
287 | { | |
57ff96cf CZ |
288 | int r; |
289 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | |
290 | r = amdgpu_bo_reserve(obj, false); | |
291 | if (unlikely(r != 0)) | |
292 | return r; | |
293 | r = amdgpu_bo_unpin(obj); | |
294 | amdgpu_bo_unreserve(obj); | |
295 | return r; | |
d03846af CZ |
296 | } |
297 | ||
298 | static int amdgpu_cgs_kmap_gpu_mem(void *cgs_device, cgs_handle_t handle, | |
299 | void **map) | |
300 | { | |
57ff96cf CZ |
301 | int r; |
302 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | |
303 | r = amdgpu_bo_reserve(obj, false); | |
304 | if (unlikely(r != 0)) | |
305 | return r; | |
306 | r = amdgpu_bo_kmap(obj, map); | |
307 | amdgpu_bo_unreserve(obj); | |
308 | return r; | |
d03846af CZ |
309 | } |
310 | ||
311 | static int amdgpu_cgs_kunmap_gpu_mem(void *cgs_device, cgs_handle_t handle) | |
312 | { | |
57ff96cf CZ |
313 | int r; |
314 | struct amdgpu_bo *obj = (struct amdgpu_bo *)handle; | |
315 | r = amdgpu_bo_reserve(obj, false); | |
316 | if (unlikely(r != 0)) | |
317 | return r; | |
318 | amdgpu_bo_kunmap(obj); | |
319 | amdgpu_bo_unreserve(obj); | |
320 | return r; | |
d03846af CZ |
321 | } |
322 | ||
323 | static uint32_t amdgpu_cgs_read_register(void *cgs_device, unsigned offset) | |
324 | { | |
aba684d8 CZ |
325 | CGS_FUNC_ADEV; |
326 | return RREG32(offset); | |
d03846af CZ |
327 | } |
328 | ||
329 | static void amdgpu_cgs_write_register(void *cgs_device, unsigned offset, | |
330 | uint32_t value) | |
331 | { | |
aba684d8 CZ |
332 | CGS_FUNC_ADEV; |
333 | WREG32(offset, value); | |
d03846af CZ |
334 | } |
335 | ||
336 | static uint32_t amdgpu_cgs_read_ind_register(void *cgs_device, | |
337 | enum cgs_ind_reg space, | |
338 | unsigned index) | |
339 | { | |
aba684d8 CZ |
340 | CGS_FUNC_ADEV; |
341 | switch (space) { | |
342 | case CGS_IND_REG__MMIO: | |
343 | return RREG32_IDX(index); | |
344 | case CGS_IND_REG__PCIE: | |
345 | return RREG32_PCIE(index); | |
346 | case CGS_IND_REG__SMC: | |
347 | return RREG32_SMC(index); | |
348 | case CGS_IND_REG__UVD_CTX: | |
349 | return RREG32_UVD_CTX(index); | |
350 | case CGS_IND_REG__DIDT: | |
351 | return RREG32_DIDT(index); | |
352 | case CGS_IND_REG__AUDIO_ENDPT: | |
353 | DRM_ERROR("audio endpt register access not implemented.\n"); | |
354 | return 0; | |
355 | } | |
356 | WARN(1, "Invalid indirect register space"); | |
d03846af CZ |
357 | return 0; |
358 | } | |
359 | ||
360 | static void amdgpu_cgs_write_ind_register(void *cgs_device, | |
361 | enum cgs_ind_reg space, | |
362 | unsigned index, uint32_t value) | |
363 | { | |
aba684d8 CZ |
364 | CGS_FUNC_ADEV; |
365 | switch (space) { | |
366 | case CGS_IND_REG__MMIO: | |
367 | return WREG32_IDX(index, value); | |
368 | case CGS_IND_REG__PCIE: | |
369 | return WREG32_PCIE(index, value); | |
370 | case CGS_IND_REG__SMC: | |
371 | return WREG32_SMC(index, value); | |
372 | case CGS_IND_REG__UVD_CTX: | |
373 | return WREG32_UVD_CTX(index, value); | |
374 | case CGS_IND_REG__DIDT: | |
375 | return WREG32_DIDT(index, value); | |
376 | case CGS_IND_REG__AUDIO_ENDPT: | |
377 | DRM_ERROR("audio endpt register access not implemented.\n"); | |
378 | return; | |
379 | } | |
380 | WARN(1, "Invalid indirect register space"); | |
d03846af CZ |
381 | } |
382 | ||
383 | static uint8_t amdgpu_cgs_read_pci_config_byte(void *cgs_device, unsigned addr) | |
384 | { | |
97cb7f6e CZ |
385 | CGS_FUNC_ADEV; |
386 | uint8_t val; | |
387 | int ret = pci_read_config_byte(adev->pdev, addr, &val); | |
388 | if (WARN(ret, "pci_read_config_byte error")) | |
389 | return 0; | |
390 | return val; | |
d03846af CZ |
391 | } |
392 | ||
393 | static uint16_t amdgpu_cgs_read_pci_config_word(void *cgs_device, unsigned addr) | |
394 | { | |
97cb7f6e CZ |
395 | CGS_FUNC_ADEV; |
396 | uint16_t val; | |
397 | int ret = pci_read_config_word(adev->pdev, addr, &val); | |
398 | if (WARN(ret, "pci_read_config_word error")) | |
399 | return 0; | |
400 | return val; | |
d03846af CZ |
401 | } |
402 | ||
403 | static uint32_t amdgpu_cgs_read_pci_config_dword(void *cgs_device, | |
404 | unsigned addr) | |
405 | { | |
97cb7f6e CZ |
406 | CGS_FUNC_ADEV; |
407 | uint32_t val; | |
408 | int ret = pci_read_config_dword(adev->pdev, addr, &val); | |
409 | if (WARN(ret, "pci_read_config_dword error")) | |
410 | return 0; | |
411 | return val; | |
d03846af CZ |
412 | } |
413 | ||
414 | static void amdgpu_cgs_write_pci_config_byte(void *cgs_device, unsigned addr, | |
415 | uint8_t value) | |
416 | { | |
97cb7f6e CZ |
417 | CGS_FUNC_ADEV; |
418 | int ret = pci_write_config_byte(adev->pdev, addr, value); | |
419 | WARN(ret, "pci_write_config_byte error"); | |
d03846af CZ |
420 | } |
421 | ||
422 | static void amdgpu_cgs_write_pci_config_word(void *cgs_device, unsigned addr, | |
423 | uint16_t value) | |
424 | { | |
97cb7f6e CZ |
425 | CGS_FUNC_ADEV; |
426 | int ret = pci_write_config_word(adev->pdev, addr, value); | |
427 | WARN(ret, "pci_write_config_word error"); | |
d03846af CZ |
428 | } |
429 | ||
430 | static void amdgpu_cgs_write_pci_config_dword(void *cgs_device, unsigned addr, | |
431 | uint32_t value) | |
432 | { | |
97cb7f6e CZ |
433 | CGS_FUNC_ADEV; |
434 | int ret = pci_write_config_dword(adev->pdev, addr, value); | |
435 | WARN(ret, "pci_write_config_dword error"); | |
d03846af CZ |
436 | } |
437 | ||
438 | static const void *amdgpu_cgs_atom_get_data_table(void *cgs_device, | |
439 | unsigned table, uint16_t *size, | |
440 | uint8_t *frev, uint8_t *crev) | |
441 | { | |
25da4427 CZ |
442 | CGS_FUNC_ADEV; |
443 | uint16_t data_start; | |
444 | ||
445 | if (amdgpu_atom_parse_data_header( | |
446 | adev->mode_info.atom_context, table, size, | |
447 | frev, crev, &data_start)) | |
448 | return (uint8_t*)adev->mode_info.atom_context->bios + | |
449 | data_start; | |
450 | ||
d03846af CZ |
451 | return NULL; |
452 | } | |
453 | ||
454 | static int amdgpu_cgs_atom_get_cmd_table_revs(void *cgs_device, unsigned table, | |
455 | uint8_t *frev, uint8_t *crev) | |
456 | { | |
25da4427 CZ |
457 | CGS_FUNC_ADEV; |
458 | ||
459 | if (amdgpu_atom_parse_cmd_header( | |
460 | adev->mode_info.atom_context, table, | |
461 | frev, crev)) | |
462 | return 0; | |
463 | ||
464 | return -EINVAL; | |
d03846af CZ |
465 | } |
466 | ||
467 | static int amdgpu_cgs_atom_exec_cmd_table(void *cgs_device, unsigned table, | |
468 | void *args) | |
469 | { | |
25da4427 | 470 | CGS_FUNC_ADEV; |
d03846af | 471 | |
25da4427 CZ |
472 | return amdgpu_atom_execute_table( |
473 | adev->mode_info.atom_context, table, args); | |
474 | } | |
d03846af CZ |
475 | |
476 | static int amdgpu_cgs_create_pm_request(void *cgs_device, cgs_handle_t *request) | |
477 | { | |
478 | /* TODO */ | |
479 | return 0; | |
480 | } | |
481 | ||
482 | static int amdgpu_cgs_destroy_pm_request(void *cgs_device, cgs_handle_t request) | |
483 | { | |
484 | /* TODO */ | |
485 | return 0; | |
486 | } | |
487 | ||
488 | static int amdgpu_cgs_set_pm_request(void *cgs_device, cgs_handle_t request, | |
489 | int active) | |
490 | { | |
491 | /* TODO */ | |
492 | return 0; | |
493 | } | |
494 | ||
495 | static int amdgpu_cgs_pm_request_clock(void *cgs_device, cgs_handle_t request, | |
496 | enum cgs_clock clock, unsigned freq) | |
497 | { | |
498 | /* TODO */ | |
499 | return 0; | |
500 | } | |
501 | ||
502 | static int amdgpu_cgs_pm_request_engine(void *cgs_device, cgs_handle_t request, | |
503 | enum cgs_engine engine, int powered) | |
504 | { | |
505 | /* TODO */ | |
506 | return 0; | |
507 | } | |
508 | ||
509 | ||
510 | ||
511 | static int amdgpu_cgs_pm_query_clock_limits(void *cgs_device, | |
512 | enum cgs_clock clock, | |
513 | struct cgs_clock_limits *limits) | |
514 | { | |
515 | /* TODO */ | |
516 | return 0; | |
517 | } | |
518 | ||
519 | static int amdgpu_cgs_set_camera_voltages(void *cgs_device, uint32_t mask, | |
520 | const uint32_t *voltages) | |
521 | { | |
522 | DRM_ERROR("not implemented"); | |
523 | return -EPERM; | |
524 | } | |
525 | ||
0cf3be21 AD |
526 | struct cgs_irq_params { |
527 | unsigned src_id; | |
528 | cgs_irq_source_set_func_t set; | |
529 | cgs_irq_handler_func_t handler; | |
530 | void *private_data; | |
531 | }; | |
532 | ||
533 | static int cgs_set_irq_state(struct amdgpu_device *adev, | |
534 | struct amdgpu_irq_src *src, | |
535 | unsigned type, | |
536 | enum amdgpu_interrupt_state state) | |
537 | { | |
538 | struct cgs_irq_params *irq_params = | |
539 | (struct cgs_irq_params *)src->data; | |
540 | if (!irq_params) | |
541 | return -EINVAL; | |
542 | if (!irq_params->set) | |
543 | return -EINVAL; | |
544 | return irq_params->set(irq_params->private_data, | |
545 | irq_params->src_id, | |
546 | type, | |
547 | (int)state); | |
548 | } | |
549 | ||
550 | static int cgs_process_irq(struct amdgpu_device *adev, | |
551 | struct amdgpu_irq_src *source, | |
552 | struct amdgpu_iv_entry *entry) | |
553 | { | |
554 | struct cgs_irq_params *irq_params = | |
555 | (struct cgs_irq_params *)source->data; | |
556 | if (!irq_params) | |
557 | return -EINVAL; | |
558 | if (!irq_params->handler) | |
559 | return -EINVAL; | |
560 | return irq_params->handler(irq_params->private_data, | |
561 | irq_params->src_id, | |
562 | entry->iv_entry); | |
563 | } | |
564 | ||
565 | static const struct amdgpu_irq_src_funcs cgs_irq_funcs = { | |
566 | .set = cgs_set_irq_state, | |
567 | .process = cgs_process_irq, | |
568 | }; | |
569 | ||
d03846af CZ |
570 | static int amdgpu_cgs_add_irq_source(void *cgs_device, unsigned src_id, |
571 | unsigned num_types, | |
572 | cgs_irq_source_set_func_t set, | |
573 | cgs_irq_handler_func_t handler, | |
574 | void *private_data) | |
575 | { | |
0cf3be21 AD |
576 | CGS_FUNC_ADEV; |
577 | int ret = 0; | |
578 | struct cgs_irq_params *irq_params; | |
579 | struct amdgpu_irq_src *source = | |
580 | kzalloc(sizeof(struct amdgpu_irq_src), GFP_KERNEL); | |
581 | if (!source) | |
582 | return -ENOMEM; | |
583 | irq_params = | |
584 | kzalloc(sizeof(struct cgs_irq_params), GFP_KERNEL); | |
585 | if (!irq_params) { | |
586 | kfree(source); | |
587 | return -ENOMEM; | |
588 | } | |
589 | source->num_types = num_types; | |
590 | source->funcs = &cgs_irq_funcs; | |
591 | irq_params->src_id = src_id; | |
592 | irq_params->set = set; | |
593 | irq_params->handler = handler; | |
594 | irq_params->private_data = private_data; | |
595 | source->data = (void *)irq_params; | |
596 | ret = amdgpu_irq_add_id(adev, src_id, source); | |
597 | if (ret) { | |
598 | kfree(irq_params); | |
599 | kfree(source); | |
600 | } | |
601 | ||
602 | return ret; | |
d03846af CZ |
603 | } |
604 | ||
605 | static int amdgpu_cgs_irq_get(void *cgs_device, unsigned src_id, unsigned type) | |
606 | { | |
0cf3be21 AD |
607 | CGS_FUNC_ADEV; |
608 | return amdgpu_irq_get(adev, adev->irq.sources[src_id], type); | |
d03846af CZ |
609 | } |
610 | ||
611 | static int amdgpu_cgs_irq_put(void *cgs_device, unsigned src_id, unsigned type) | |
612 | { | |
0cf3be21 AD |
613 | CGS_FUNC_ADEV; |
614 | return amdgpu_irq_put(adev, adev->irq.sources[src_id], type); | |
d03846af CZ |
615 | } |
616 | ||
bf3911b0 JZ |
617 | static uint32_t fw_type_convert(void *cgs_device, uint32_t fw_type) |
618 | { | |
619 | CGS_FUNC_ADEV; | |
620 | enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM; | |
621 | ||
622 | switch (fw_type) { | |
623 | case CGS_UCODE_ID_SDMA0: | |
624 | result = AMDGPU_UCODE_ID_SDMA0; | |
625 | break; | |
626 | case CGS_UCODE_ID_SDMA1: | |
627 | result = AMDGPU_UCODE_ID_SDMA1; | |
628 | break; | |
629 | case CGS_UCODE_ID_CP_CE: | |
630 | result = AMDGPU_UCODE_ID_CP_CE; | |
631 | break; | |
632 | case CGS_UCODE_ID_CP_PFP: | |
633 | result = AMDGPU_UCODE_ID_CP_PFP; | |
634 | break; | |
635 | case CGS_UCODE_ID_CP_ME: | |
636 | result = AMDGPU_UCODE_ID_CP_ME; | |
637 | break; | |
638 | case CGS_UCODE_ID_CP_MEC: | |
639 | case CGS_UCODE_ID_CP_MEC_JT1: | |
640 | result = AMDGPU_UCODE_ID_CP_MEC1; | |
641 | break; | |
642 | case CGS_UCODE_ID_CP_MEC_JT2: | |
643 | if (adev->asic_type == CHIP_TONGA) | |
644 | result = AMDGPU_UCODE_ID_CP_MEC2; | |
645 | else if (adev->asic_type == CHIP_CARRIZO) | |
646 | result = AMDGPU_UCODE_ID_CP_MEC1; | |
647 | break; | |
648 | case CGS_UCODE_ID_RLC_G: | |
649 | result = AMDGPU_UCODE_ID_RLC_G; | |
650 | break; | |
651 | default: | |
652 | DRM_ERROR("Firmware type not supported\n"); | |
653 | } | |
654 | return result; | |
655 | } | |
656 | ||
657 | static int amdgpu_cgs_get_firmware_info(void *cgs_device, | |
658 | enum cgs_ucode_id type, | |
659 | struct cgs_firmware_info *info) | |
660 | { | |
661 | CGS_FUNC_ADEV; | |
662 | ||
663 | if (CGS_UCODE_ID_SMU != type) { | |
664 | uint64_t gpu_addr; | |
665 | uint32_t data_size; | |
666 | const struct gfx_firmware_header_v1_0 *header; | |
667 | enum AMDGPU_UCODE_ID id; | |
668 | struct amdgpu_firmware_info *ucode; | |
669 | ||
670 | id = fw_type_convert(cgs_device, type); | |
671 | ucode = &adev->firmware.ucode[id]; | |
672 | if (ucode->fw == NULL) | |
673 | return -EINVAL; | |
674 | ||
675 | gpu_addr = ucode->mc_addr; | |
676 | header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; | |
677 | data_size = le32_to_cpu(header->header.ucode_size_bytes); | |
678 | ||
679 | if ((type == CGS_UCODE_ID_CP_MEC_JT1) || | |
680 | (type == CGS_UCODE_ID_CP_MEC_JT2)) { | |
681 | gpu_addr += le32_to_cpu(header->jt_offset) << 2; | |
682 | data_size = le32_to_cpu(header->jt_size) << 2; | |
683 | } | |
684 | info->mc_addr = gpu_addr; | |
685 | info->image_size = data_size; | |
686 | info->version = (uint16_t)le32_to_cpu(header->header.ucode_version); | |
687 | info->feature_version = (uint16_t)le32_to_cpu(header->ucode_feature_version); | |
688 | } else { | |
689 | char fw_name[30] = {0}; | |
690 | int err = 0; | |
691 | uint32_t ucode_size; | |
692 | uint32_t ucode_start_address; | |
693 | const uint8_t *src; | |
694 | const struct smc_firmware_header_v1_0 *hdr; | |
695 | ||
696 | switch (adev->asic_type) { | |
697 | case CHIP_TONGA: | |
698 | strcpy(fw_name, "amdgpu/tonga_smc.bin"); | |
699 | break; | |
700 | default: | |
701 | DRM_ERROR("SMC firmware not supported\n"); | |
702 | return -EINVAL; | |
703 | } | |
704 | ||
705 | err = request_firmware(&adev->pm.fw, fw_name, adev->dev); | |
706 | if (err) { | |
707 | DRM_ERROR("Failed to request firmware\n"); | |
708 | return err; | |
709 | } | |
710 | ||
711 | err = amdgpu_ucode_validate(adev->pm.fw); | |
712 | if (err) { | |
713 | DRM_ERROR("Failed to load firmware \"%s\"", fw_name); | |
714 | release_firmware(adev->pm.fw); | |
715 | adev->pm.fw = NULL; | |
716 | return err; | |
717 | } | |
718 | ||
719 | hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; | |
720 | adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); | |
721 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); | |
722 | ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); | |
723 | src = (const uint8_t *)(adev->pm.fw->data + | |
724 | le32_to_cpu(hdr->header.ucode_array_offset_bytes)); | |
725 | ||
726 | info->version = adev->pm.fw_version; | |
727 | info->image_size = ucode_size; | |
728 | info->kptr = (void *)src; | |
729 | } | |
730 | return 0; | |
731 | } | |
732 | ||
d03846af CZ |
733 | static const struct cgs_ops amdgpu_cgs_ops = { |
734 | amdgpu_cgs_gpu_mem_info, | |
735 | amdgpu_cgs_gmap_kmem, | |
736 | amdgpu_cgs_gunmap_kmem, | |
737 | amdgpu_cgs_alloc_gpu_mem, | |
738 | amdgpu_cgs_free_gpu_mem, | |
739 | amdgpu_cgs_gmap_gpu_mem, | |
740 | amdgpu_cgs_gunmap_gpu_mem, | |
741 | amdgpu_cgs_kmap_gpu_mem, | |
742 | amdgpu_cgs_kunmap_gpu_mem, | |
743 | amdgpu_cgs_read_register, | |
744 | amdgpu_cgs_write_register, | |
745 | amdgpu_cgs_read_ind_register, | |
746 | amdgpu_cgs_write_ind_register, | |
747 | amdgpu_cgs_read_pci_config_byte, | |
748 | amdgpu_cgs_read_pci_config_word, | |
749 | amdgpu_cgs_read_pci_config_dword, | |
750 | amdgpu_cgs_write_pci_config_byte, | |
751 | amdgpu_cgs_write_pci_config_word, | |
752 | amdgpu_cgs_write_pci_config_dword, | |
753 | amdgpu_cgs_atom_get_data_table, | |
754 | amdgpu_cgs_atom_get_cmd_table_revs, | |
755 | amdgpu_cgs_atom_exec_cmd_table, | |
756 | amdgpu_cgs_create_pm_request, | |
757 | amdgpu_cgs_destroy_pm_request, | |
758 | amdgpu_cgs_set_pm_request, | |
759 | amdgpu_cgs_pm_request_clock, | |
760 | amdgpu_cgs_pm_request_engine, | |
761 | amdgpu_cgs_pm_query_clock_limits, | |
bf3911b0 JZ |
762 | amdgpu_cgs_set_camera_voltages, |
763 | amdgpu_cgs_get_firmware_info | |
d03846af CZ |
764 | }; |
765 | ||
766 | static const struct cgs_os_ops amdgpu_cgs_os_ops = { | |
767 | amdgpu_cgs_import_gpu_mem, | |
768 | amdgpu_cgs_add_irq_source, | |
769 | amdgpu_cgs_irq_get, | |
770 | amdgpu_cgs_irq_put | |
771 | }; | |
772 | ||
773 | void *amdgpu_cgs_create_device(struct amdgpu_device *adev) | |
774 | { | |
775 | struct amdgpu_cgs_device *cgs_device = | |
776 | kmalloc(sizeof(*cgs_device), GFP_KERNEL); | |
777 | ||
778 | if (!cgs_device) { | |
779 | DRM_ERROR("Couldn't allocate CGS device structure\n"); | |
780 | return NULL; | |
781 | } | |
782 | ||
783 | cgs_device->base.ops = &amdgpu_cgs_ops; | |
784 | cgs_device->base.os_ops = &amdgpu_cgs_os_ops; | |
785 | cgs_device->adev = adev; | |
786 | ||
787 | return cgs_device; | |
788 | } | |
789 | ||
790 | void amdgpu_cgs_destroy_device(void *cgs_device) | |
791 | { | |
792 | kfree(cgs_device); | |
793 | } |