2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/list.h>
25 #include <linux/slab.h>
26 #include <linux/pci.h>
27 #include <linux/acpi.h>
29 #include <linux/firmware.h>
30 #include <drm/amdgpu_drm.h>
32 #include "cgs_linux.h"
34 #include "amdgpu_ucode.h"
36 struct amdgpu_cgs_device
{
37 struct cgs_device base
;
38 struct amdgpu_device
*adev
;
41 #define CGS_FUNC_ADEV \
42 struct amdgpu_device *adev = \
43 ((struct amdgpu_cgs_device *)cgs_device)->adev
45 static int amdgpu_cgs_gpu_mem_info(struct cgs_device
*cgs_device
, enum cgs_gpu_mem_type type
,
46 uint64_t *mc_start
, uint64_t *mc_size
,
51 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
:
52 case CGS_GPU_MEM_TYPE__VISIBLE_FB
:
54 *mc_size
= adev
->mc
.visible_vram_size
;
55 *mem_size
= adev
->mc
.visible_vram_size
- adev
->vram_pin_size
;
57 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB
:
58 case CGS_GPU_MEM_TYPE__INVISIBLE_FB
:
59 *mc_start
= adev
->mc
.visible_vram_size
;
60 *mc_size
= adev
->mc
.real_vram_size
- adev
->mc
.visible_vram_size
;
63 case CGS_GPU_MEM_TYPE__GART_CACHEABLE
:
64 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
:
65 *mc_start
= adev
->mc
.gtt_start
;
66 *mc_size
= adev
->mc
.gtt_size
;
67 *mem_size
= adev
->mc
.gtt_size
- adev
->gart_pin_size
;
76 static int amdgpu_cgs_gmap_kmem(struct cgs_device
*cgs_device
, void *kmem
,
78 uint64_t min_offset
, uint64_t max_offset
,
79 cgs_handle_t
*kmem_handle
, uint64_t *mcaddr
)
84 struct page
*kmem_page
= vmalloc_to_page(kmem
);
85 int npages
= ALIGN(size
, PAGE_SIZE
) >> PAGE_SHIFT
;
87 struct sg_table
*sg
= drm_prime_pages_to_sg(&kmem_page
, npages
);
88 ret
= amdgpu_bo_create(adev
, size
, PAGE_SIZE
, false,
89 AMDGPU_GEM_DOMAIN_GTT
, 0, sg
, NULL
, &bo
);
92 ret
= amdgpu_bo_reserve(bo
, false);
93 if (unlikely(ret
!= 0))
96 /* pin buffer into GTT */
97 ret
= amdgpu_bo_pin_restricted(bo
, AMDGPU_GEM_DOMAIN_GTT
,
98 min_offset
, max_offset
, mcaddr
);
99 amdgpu_bo_unreserve(bo
);
101 *kmem_handle
= (cgs_handle_t
)bo
;
105 static int amdgpu_cgs_gunmap_kmem(struct cgs_device
*cgs_device
, cgs_handle_t kmem_handle
)
107 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)kmem_handle
;
110 int r
= amdgpu_bo_reserve(obj
, false);
111 if (likely(r
== 0)) {
112 amdgpu_bo_unpin(obj
);
113 amdgpu_bo_unreserve(obj
);
115 amdgpu_bo_unref(&obj
);
121 static int amdgpu_cgs_alloc_gpu_mem(struct cgs_device
*cgs_device
,
122 enum cgs_gpu_mem_type type
,
123 uint64_t size
, uint64_t align
,
124 uint64_t min_offset
, uint64_t max_offset
,
125 cgs_handle_t
*handle
)
131 struct amdgpu_bo
*obj
;
132 struct ttm_placement placement
;
133 struct ttm_place place
;
135 if (min_offset
> max_offset
) {
140 /* fail if the alignment is not a power of 2 */
141 if (((align
!= 1) && (align
& (align
- 1)))
142 || size
== 0 || align
== 0)
147 case CGS_GPU_MEM_TYPE__VISIBLE_CONTIG_FB
:
148 case CGS_GPU_MEM_TYPE__VISIBLE_FB
:
149 flags
= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED
;
150 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
151 if (max_offset
> adev
->mc
.real_vram_size
)
153 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
154 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
155 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
158 case CGS_GPU_MEM_TYPE__INVISIBLE_CONTIG_FB
:
159 case CGS_GPU_MEM_TYPE__INVISIBLE_FB
:
160 flags
= AMDGPU_GEM_CREATE_NO_CPU_ACCESS
;
161 domain
= AMDGPU_GEM_DOMAIN_VRAM
;
162 if (adev
->mc
.visible_vram_size
< adev
->mc
.real_vram_size
) {
164 max(min_offset
, adev
->mc
.visible_vram_size
) >> PAGE_SHIFT
;
166 min(max_offset
, adev
->mc
.real_vram_size
) >> PAGE_SHIFT
;
167 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_UNCACHED
|
172 case CGS_GPU_MEM_TYPE__GART_CACHEABLE
:
173 domain
= AMDGPU_GEM_DOMAIN_GTT
;
174 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
175 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
176 place
.flags
= TTM_PL_FLAG_CACHED
| TTM_PL_FLAG_TT
;
178 case CGS_GPU_MEM_TYPE__GART_WRITECOMBINE
:
179 flags
= AMDGPU_GEM_CREATE_CPU_GTT_USWC
;
180 domain
= AMDGPU_GEM_DOMAIN_GTT
;
181 place
.fpfn
= min_offset
>> PAGE_SHIFT
;
182 place
.lpfn
= max_offset
>> PAGE_SHIFT
;
183 place
.flags
= TTM_PL_FLAG_WC
| TTM_PL_FLAG_TT
|
184 TTM_PL_FLAG_UNCACHED
;
193 placement
.placement
= &place
;
194 placement
.num_placement
= 1;
195 placement
.busy_placement
= &place
;
196 placement
.num_busy_placement
= 1;
198 ret
= amdgpu_bo_create_restricted(adev
, size
, PAGE_SIZE
,
200 NULL
, &placement
, NULL
,
203 DRM_ERROR("(%d) bo create failed\n", ret
);
206 *handle
= (cgs_handle_t
)obj
;
211 static int amdgpu_cgs_free_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
)
213 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
216 int r
= amdgpu_bo_reserve(obj
, false);
217 if (likely(r
== 0)) {
218 amdgpu_bo_kunmap(obj
);
219 amdgpu_bo_unpin(obj
);
220 amdgpu_bo_unreserve(obj
);
222 amdgpu_bo_unref(&obj
);
228 static int amdgpu_cgs_gmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
,
232 u64 min_offset
, max_offset
;
233 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
235 WARN_ON_ONCE(obj
->placement
.num_placement
> 1);
237 min_offset
= obj
->placements
[0].fpfn
<< PAGE_SHIFT
;
238 max_offset
= obj
->placements
[0].lpfn
<< PAGE_SHIFT
;
240 r
= amdgpu_bo_reserve(obj
, false);
241 if (unlikely(r
!= 0))
243 r
= amdgpu_bo_pin_restricted(obj
, AMDGPU_GEM_DOMAIN_GTT
,
244 min_offset
, max_offset
, mcaddr
);
245 amdgpu_bo_unreserve(obj
);
249 static int amdgpu_cgs_gunmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
)
252 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
253 r
= amdgpu_bo_reserve(obj
, false);
254 if (unlikely(r
!= 0))
256 r
= amdgpu_bo_unpin(obj
);
257 amdgpu_bo_unreserve(obj
);
261 static int amdgpu_cgs_kmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
,
265 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
266 r
= amdgpu_bo_reserve(obj
, false);
267 if (unlikely(r
!= 0))
269 r
= amdgpu_bo_kmap(obj
, map
);
270 amdgpu_bo_unreserve(obj
);
274 static int amdgpu_cgs_kunmap_gpu_mem(struct cgs_device
*cgs_device
, cgs_handle_t handle
)
277 struct amdgpu_bo
*obj
= (struct amdgpu_bo
*)handle
;
278 r
= amdgpu_bo_reserve(obj
, false);
279 if (unlikely(r
!= 0))
281 amdgpu_bo_kunmap(obj
);
282 amdgpu_bo_unreserve(obj
);
286 static uint32_t amdgpu_cgs_read_register(struct cgs_device
*cgs_device
, unsigned offset
)
289 return RREG32(offset
);
292 static void amdgpu_cgs_write_register(struct cgs_device
*cgs_device
, unsigned offset
,
296 WREG32(offset
, value
);
299 static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device
*cgs_device
,
300 enum cgs_ind_reg space
,
305 case CGS_IND_REG__MMIO
:
306 return RREG32_IDX(index
);
307 case CGS_IND_REG__PCIE
:
308 return RREG32_PCIE(index
);
309 case CGS_IND_REG__SMC
:
310 return RREG32_SMC(index
);
311 case CGS_IND_REG__UVD_CTX
:
312 return RREG32_UVD_CTX(index
);
313 case CGS_IND_REG__DIDT
:
314 return RREG32_DIDT(index
);
315 case CGS_IND_REG_GC_CAC
:
316 return RREG32_GC_CAC(index
);
317 case CGS_IND_REG__AUDIO_ENDPT
:
318 DRM_ERROR("audio endpt register access not implemented.\n");
321 WARN(1, "Invalid indirect register space");
325 static void amdgpu_cgs_write_ind_register(struct cgs_device
*cgs_device
,
326 enum cgs_ind_reg space
,
327 unsigned index
, uint32_t value
)
331 case CGS_IND_REG__MMIO
:
332 return WREG32_IDX(index
, value
);
333 case CGS_IND_REG__PCIE
:
334 return WREG32_PCIE(index
, value
);
335 case CGS_IND_REG__SMC
:
336 return WREG32_SMC(index
, value
);
337 case CGS_IND_REG__UVD_CTX
:
338 return WREG32_UVD_CTX(index
, value
);
339 case CGS_IND_REG__DIDT
:
340 return WREG32_DIDT(index
, value
);
341 case CGS_IND_REG_GC_CAC
:
342 return WREG32_GC_CAC(index
, value
);
343 case CGS_IND_REG__AUDIO_ENDPT
:
344 DRM_ERROR("audio endpt register access not implemented.\n");
347 WARN(1, "Invalid indirect register space");
350 static uint8_t amdgpu_cgs_read_pci_config_byte(struct cgs_device
*cgs_device
, unsigned addr
)
354 int ret
= pci_read_config_byte(adev
->pdev
, addr
, &val
);
355 if (WARN(ret
, "pci_read_config_byte error"))
360 static uint16_t amdgpu_cgs_read_pci_config_word(struct cgs_device
*cgs_device
, unsigned addr
)
364 int ret
= pci_read_config_word(adev
->pdev
, addr
, &val
);
365 if (WARN(ret
, "pci_read_config_word error"))
370 static uint32_t amdgpu_cgs_read_pci_config_dword(struct cgs_device
*cgs_device
,
375 int ret
= pci_read_config_dword(adev
->pdev
, addr
, &val
);
376 if (WARN(ret
, "pci_read_config_dword error"))
381 static void amdgpu_cgs_write_pci_config_byte(struct cgs_device
*cgs_device
, unsigned addr
,
385 int ret
= pci_write_config_byte(adev
->pdev
, addr
, value
);
386 WARN(ret
, "pci_write_config_byte error");
389 static void amdgpu_cgs_write_pci_config_word(struct cgs_device
*cgs_device
, unsigned addr
,
393 int ret
= pci_write_config_word(adev
->pdev
, addr
, value
);
394 WARN(ret
, "pci_write_config_word error");
397 static void amdgpu_cgs_write_pci_config_dword(struct cgs_device
*cgs_device
, unsigned addr
,
401 int ret
= pci_write_config_dword(adev
->pdev
, addr
, value
);
402 WARN(ret
, "pci_write_config_dword error");
406 static int amdgpu_cgs_get_pci_resource(struct cgs_device
*cgs_device
,
407 enum cgs_resource_type resource_type
,
410 uint64_t *resource_base
)
414 if (resource_base
== NULL
)
417 switch (resource_type
) {
418 case CGS_RESOURCE_TYPE_MMIO
:
419 if (adev
->rmmio_size
== 0)
421 if ((offset
+ size
) > adev
->rmmio_size
)
423 *resource_base
= adev
->rmmio_base
;
425 case CGS_RESOURCE_TYPE_DOORBELL
:
426 if (adev
->doorbell
.size
== 0)
428 if ((offset
+ size
) > adev
->doorbell
.size
)
430 *resource_base
= adev
->doorbell
.base
;
432 case CGS_RESOURCE_TYPE_FB
:
433 case CGS_RESOURCE_TYPE_IO
:
434 case CGS_RESOURCE_TYPE_ROM
:
440 static const void *amdgpu_cgs_atom_get_data_table(struct cgs_device
*cgs_device
,
441 unsigned table
, uint16_t *size
,
442 uint8_t *frev
, uint8_t *crev
)
447 if (amdgpu_atom_parse_data_header(
448 adev
->mode_info
.atom_context
, table
, size
,
449 frev
, crev
, &data_start
))
450 return (uint8_t*)adev
->mode_info
.atom_context
->bios
+
456 static int amdgpu_cgs_atom_get_cmd_table_revs(struct cgs_device
*cgs_device
, unsigned table
,
457 uint8_t *frev
, uint8_t *crev
)
461 if (amdgpu_atom_parse_cmd_header(
462 adev
->mode_info
.atom_context
, table
,
469 static int amdgpu_cgs_atom_exec_cmd_table(struct cgs_device
*cgs_device
, unsigned table
,
474 return amdgpu_atom_execute_table(
475 adev
->mode_info
.atom_context
, table
, args
);
478 static int amdgpu_cgs_create_pm_request(struct cgs_device
*cgs_device
, cgs_handle_t
*request
)
484 static int amdgpu_cgs_destroy_pm_request(struct cgs_device
*cgs_device
, cgs_handle_t request
)
490 static int amdgpu_cgs_set_pm_request(struct cgs_device
*cgs_device
, cgs_handle_t request
,
497 static int amdgpu_cgs_pm_request_clock(struct cgs_device
*cgs_device
, cgs_handle_t request
,
498 enum cgs_clock clock
, unsigned freq
)
504 static int amdgpu_cgs_pm_request_engine(struct cgs_device
*cgs_device
, cgs_handle_t request
,
505 enum cgs_engine engine
, int powered
)
513 static int amdgpu_cgs_pm_query_clock_limits(struct cgs_device
*cgs_device
,
514 enum cgs_clock clock
,
515 struct cgs_clock_limits
*limits
)
521 static int amdgpu_cgs_set_camera_voltages(struct cgs_device
*cgs_device
, uint32_t mask
,
522 const uint32_t *voltages
)
524 DRM_ERROR("not implemented");
528 struct cgs_irq_params
{
530 cgs_irq_source_set_func_t set
;
531 cgs_irq_handler_func_t handler
;
535 static int cgs_set_irq_state(struct amdgpu_device
*adev
,
536 struct amdgpu_irq_src
*src
,
538 enum amdgpu_interrupt_state state
)
540 struct cgs_irq_params
*irq_params
=
541 (struct cgs_irq_params
*)src
->data
;
544 if (!irq_params
->set
)
546 return irq_params
->set(irq_params
->private_data
,
552 static int cgs_process_irq(struct amdgpu_device
*adev
,
553 struct amdgpu_irq_src
*source
,
554 struct amdgpu_iv_entry
*entry
)
556 struct cgs_irq_params
*irq_params
=
557 (struct cgs_irq_params
*)source
->data
;
560 if (!irq_params
->handler
)
562 return irq_params
->handler(irq_params
->private_data
,
567 static const struct amdgpu_irq_src_funcs cgs_irq_funcs
= {
568 .set
= cgs_set_irq_state
,
569 .process
= cgs_process_irq
,
572 static int amdgpu_cgs_add_irq_source(struct cgs_device
*cgs_device
, unsigned src_id
,
574 cgs_irq_source_set_func_t set
,
575 cgs_irq_handler_func_t handler
,
580 struct cgs_irq_params
*irq_params
;
581 struct amdgpu_irq_src
*source
=
582 kzalloc(sizeof(struct amdgpu_irq_src
), GFP_KERNEL
);
586 kzalloc(sizeof(struct cgs_irq_params
), GFP_KERNEL
);
591 source
->num_types
= num_types
;
592 source
->funcs
= &cgs_irq_funcs
;
593 irq_params
->src_id
= src_id
;
594 irq_params
->set
= set
;
595 irq_params
->handler
= handler
;
596 irq_params
->private_data
= private_data
;
597 source
->data
= (void *)irq_params
;
598 ret
= amdgpu_irq_add_id(adev
, src_id
, source
);
607 static int amdgpu_cgs_irq_get(struct cgs_device
*cgs_device
, unsigned src_id
, unsigned type
)
610 return amdgpu_irq_get(adev
, adev
->irq
.sources
[src_id
], type
);
613 static int amdgpu_cgs_irq_put(struct cgs_device
*cgs_device
, unsigned src_id
, unsigned type
)
616 return amdgpu_irq_put(adev
, adev
->irq
.sources
[src_id
], type
);
619 int amdgpu_cgs_set_clockgating_state(struct cgs_device
*cgs_device
,
620 enum amd_ip_block_type block_type
,
621 enum amd_clockgating_state state
)
626 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
627 if (!adev
->ip_block_status
[i
].valid
)
630 if (adev
->ip_blocks
[i
].type
== block_type
) {
631 r
= adev
->ip_blocks
[i
].funcs
->set_clockgating_state(
640 int amdgpu_cgs_set_powergating_state(struct cgs_device
*cgs_device
,
641 enum amd_ip_block_type block_type
,
642 enum amd_powergating_state state
)
647 for (i
= 0; i
< adev
->num_ip_blocks
; i
++) {
648 if (!adev
->ip_block_status
[i
].valid
)
651 if (adev
->ip_blocks
[i
].type
== block_type
) {
652 r
= adev
->ip_blocks
[i
].funcs
->set_powergating_state(
662 static uint32_t fw_type_convert(struct cgs_device
*cgs_device
, uint32_t fw_type
)
665 enum AMDGPU_UCODE_ID result
= AMDGPU_UCODE_ID_MAXIMUM
;
668 case CGS_UCODE_ID_SDMA0
:
669 result
= AMDGPU_UCODE_ID_SDMA0
;
671 case CGS_UCODE_ID_SDMA1
:
672 result
= AMDGPU_UCODE_ID_SDMA1
;
674 case CGS_UCODE_ID_CP_CE
:
675 result
= AMDGPU_UCODE_ID_CP_CE
;
677 case CGS_UCODE_ID_CP_PFP
:
678 result
= AMDGPU_UCODE_ID_CP_PFP
;
680 case CGS_UCODE_ID_CP_ME
:
681 result
= AMDGPU_UCODE_ID_CP_ME
;
683 case CGS_UCODE_ID_CP_MEC
:
684 case CGS_UCODE_ID_CP_MEC_JT1
:
685 result
= AMDGPU_UCODE_ID_CP_MEC1
;
687 case CGS_UCODE_ID_CP_MEC_JT2
:
688 if (adev
->asic_type
== CHIP_TONGA
|| adev
->asic_type
== CHIP_POLARIS11
689 || adev
->asic_type
== CHIP_POLARIS10
)
690 result
= AMDGPU_UCODE_ID_CP_MEC2
;
692 result
= AMDGPU_UCODE_ID_CP_MEC1
;
694 case CGS_UCODE_ID_RLC_G
:
695 result
= AMDGPU_UCODE_ID_RLC_G
;
698 DRM_ERROR("Firmware type not supported\n");
703 static int amdgpu_cgs_rel_firmware(struct cgs_device
*cgs_device
, enum cgs_ucode_id type
)
706 if ((CGS_UCODE_ID_SMU
== type
) || (CGS_UCODE_ID_SMU_SK
== type
)) {
707 release_firmware(adev
->pm
.fw
);
710 /* cannot release other firmware because they are not created by cgs */
714 static int amdgpu_cgs_get_firmware_info(struct cgs_device
*cgs_device
,
715 enum cgs_ucode_id type
,
716 struct cgs_firmware_info
*info
)
720 if ((CGS_UCODE_ID_SMU
!= type
) && (CGS_UCODE_ID_SMU_SK
!= type
)) {
723 const struct gfx_firmware_header_v1_0
*header
;
724 enum AMDGPU_UCODE_ID id
;
725 struct amdgpu_firmware_info
*ucode
;
727 id
= fw_type_convert(cgs_device
, type
);
728 ucode
= &adev
->firmware
.ucode
[id
];
729 if (ucode
->fw
== NULL
)
732 gpu_addr
= ucode
->mc_addr
;
733 header
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
734 data_size
= le32_to_cpu(header
->header
.ucode_size_bytes
);
736 if ((type
== CGS_UCODE_ID_CP_MEC_JT1
) ||
737 (type
== CGS_UCODE_ID_CP_MEC_JT2
)) {
738 gpu_addr
+= le32_to_cpu(header
->jt_offset
) << 2;
739 data_size
= le32_to_cpu(header
->jt_size
) << 2;
741 info
->mc_addr
= gpu_addr
;
742 info
->image_size
= data_size
;
743 info
->version
= (uint16_t)le32_to_cpu(header
->header
.ucode_version
);
744 info
->feature_version
= (uint16_t)le32_to_cpu(header
->ucode_feature_version
);
746 char fw_name
[30] = {0};
749 uint32_t ucode_start_address
;
751 const struct smc_firmware_header_v1_0
*hdr
;
754 switch (adev
->asic_type
) {
756 strcpy(fw_name
, "amdgpu/topaz_smc.bin");
759 strcpy(fw_name
, "amdgpu/tonga_smc.bin");
762 strcpy(fw_name
, "amdgpu/fiji_smc.bin");
765 if (type
== CGS_UCODE_ID_SMU
)
766 strcpy(fw_name
, "amdgpu/polaris11_smc.bin");
767 else if (type
== CGS_UCODE_ID_SMU_SK
)
768 strcpy(fw_name
, "amdgpu/polaris11_smc_sk.bin");
771 if (type
== CGS_UCODE_ID_SMU
)
772 strcpy(fw_name
, "amdgpu/polaris10_smc.bin");
773 else if (type
== CGS_UCODE_ID_SMU_SK
)
774 strcpy(fw_name
, "amdgpu/polaris10_smc_sk.bin");
777 DRM_ERROR("SMC firmware not supported\n");
781 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
783 DRM_ERROR("Failed to request firmware\n");
787 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
789 DRM_ERROR("Failed to load firmware \"%s\"", fw_name
);
790 release_firmware(adev
->pm
.fw
);
796 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
797 amdgpu_ucode_print_smc_hdr(&hdr
->header
);
798 adev
->pm
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
799 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
);
800 ucode_start_address
= le32_to_cpu(hdr
->ucode_start_addr
);
801 src
= (const uint8_t *)(adev
->pm
.fw
->data
+
802 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
804 info
->version
= adev
->pm
.fw_version
;
805 info
->image_size
= ucode_size
;
806 info
->ucode_start_address
= ucode_start_address
;
807 info
->kptr
= (void *)src
;
812 static int amdgpu_cgs_query_system_info(struct cgs_device
*cgs_device
,
813 struct cgs_system_info
*sys_info
)
817 if (NULL
== sys_info
)
820 if (sizeof(struct cgs_system_info
) != sys_info
->size
)
823 switch (sys_info
->info_id
) {
824 case CGS_SYSTEM_INFO_ADAPTER_BDF_ID
:
825 sys_info
->value
= adev
->pdev
->devfn
| (adev
->pdev
->bus
->number
<< 8);
827 case CGS_SYSTEM_INFO_PCIE_GEN_INFO
:
828 sys_info
->value
= adev
->pm
.pcie_gen_mask
;
830 case CGS_SYSTEM_INFO_PCIE_MLW
:
831 sys_info
->value
= adev
->pm
.pcie_mlw_mask
;
833 case CGS_SYSTEM_INFO_PCIE_DEV
:
834 sys_info
->value
= adev
->pdev
->device
;
836 case CGS_SYSTEM_INFO_PCIE_REV
:
837 sys_info
->value
= adev
->pdev
->revision
;
839 case CGS_SYSTEM_INFO_CG_FLAGS
:
840 sys_info
->value
= adev
->cg_flags
;
842 case CGS_SYSTEM_INFO_PG_FLAGS
:
843 sys_info
->value
= adev
->pg_flags
;
845 case CGS_SYSTEM_INFO_GFX_CU_INFO
:
846 sys_info
->value
= adev
->gfx
.cu_info
.number
;
848 case CGS_SYSTEM_INFO_GFX_SE_INFO
:
849 sys_info
->value
= adev
->gfx
.config
.max_shader_engines
;
858 static int amdgpu_cgs_get_active_displays_info(struct cgs_device
*cgs_device
,
859 struct cgs_display_info
*info
)
862 struct amdgpu_crtc
*amdgpu_crtc
;
863 struct drm_device
*ddev
= adev
->ddev
;
864 struct drm_crtc
*crtc
;
865 uint32_t line_time_us
, vblank_lines
;
866 struct cgs_mode_info
*mode_info
;
871 mode_info
= info
->mode_info
;
873 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
874 list_for_each_entry(crtc
,
875 &ddev
->mode_config
.crtc_list
, head
) {
876 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
878 info
->active_display_mask
|= (1 << amdgpu_crtc
->crtc_id
);
879 info
->display_count
++;
881 if (mode_info
!= NULL
&&
882 crtc
->enabled
&& amdgpu_crtc
->enabled
&&
883 amdgpu_crtc
->hw_mode
.clock
) {
884 line_time_us
= (amdgpu_crtc
->hw_mode
.crtc_htotal
* 1000) /
885 amdgpu_crtc
->hw_mode
.clock
;
886 vblank_lines
= amdgpu_crtc
->hw_mode
.crtc_vblank_end
-
887 amdgpu_crtc
->hw_mode
.crtc_vdisplay
+
888 (amdgpu_crtc
->v_border
* 2);
889 mode_info
->vblank_time_us
= vblank_lines
* line_time_us
;
890 mode_info
->refresh_rate
= drm_mode_vrefresh(&amdgpu_crtc
->hw_mode
);
891 mode_info
->ref_clock
= adev
->clock
.spll
.reference_freq
;
901 static int amdgpu_cgs_notify_dpm_enabled(struct cgs_device
*cgs_device
, bool enabled
)
905 adev
->pm
.dpm_enabled
= enabled
;
910 /** \brief evaluate acpi namespace object, handle or pathname must be valid
912 * \param info input/output arguments for the control method
916 #if defined(CONFIG_ACPI)
917 static int amdgpu_cgs_acpi_eval_object(struct cgs_device
*cgs_device
,
918 struct cgs_acpi_method_info
*info
)
922 struct acpi_object_list input
;
923 struct acpi_buffer output
= { ACPI_ALLOCATE_BUFFER
, NULL
};
924 union acpi_object
*params
, *obj
;
925 uint8_t name
[5] = {'\0'};
926 struct cgs_acpi_method_argument
*argument
;
931 handle
= ACPI_HANDLE(&adev
->pdev
->dev
);
935 memset(&input
, 0, sizeof(struct acpi_object_list
));
937 /* validate input info */
938 if (info
->size
!= sizeof(struct cgs_acpi_method_info
))
941 input
.count
= info
->input_count
;
942 if (info
->input_count
> 0) {
943 if (info
->pinput_argument
== NULL
)
945 argument
= info
->pinput_argument
;
946 for (i
= 0; i
< info
->input_count
; i
++) {
947 if (((argument
->type
== ACPI_TYPE_STRING
) ||
948 (argument
->type
== ACPI_TYPE_BUFFER
)) &&
949 (argument
->pointer
== NULL
))
955 if (info
->output_count
> 0) {
956 if (info
->poutput_argument
== NULL
)
958 argument
= info
->poutput_argument
;
959 for (i
= 0; i
< info
->output_count
; i
++) {
960 if (((argument
->type
== ACPI_TYPE_STRING
) ||
961 (argument
->type
== ACPI_TYPE_BUFFER
))
962 && (argument
->pointer
== NULL
))
968 /* The path name passed to acpi_evaluate_object should be null terminated */
969 if ((info
->field
& CGS_ACPI_FIELD_METHOD_NAME
) != 0) {
970 strncpy(name
, (char *)&(info
->name
), sizeof(uint32_t));
974 /* parse input parameters */
975 if (input
.count
> 0) {
976 input
.pointer
= params
=
977 kzalloc(sizeof(union acpi_object
) * input
.count
, GFP_KERNEL
);
981 argument
= info
->pinput_argument
;
983 for (i
= 0; i
< input
.count
; i
++) {
984 params
->type
= argument
->type
;
985 switch (params
->type
) {
986 case ACPI_TYPE_INTEGER
:
987 params
->integer
.value
= argument
->value
;
989 case ACPI_TYPE_STRING
:
990 params
->string
.length
= argument
->data_length
;
991 params
->string
.pointer
= argument
->pointer
;
993 case ACPI_TYPE_BUFFER
:
994 params
->buffer
.length
= argument
->data_length
;
995 params
->buffer
.pointer
= argument
->pointer
;
1005 /* parse output info */
1006 count
= info
->output_count
;
1007 argument
= info
->poutput_argument
;
1009 /* evaluate the acpi method */
1010 status
= acpi_evaluate_object(handle
, name
, &input
, &output
);
1012 if (ACPI_FAILURE(status
)) {
1017 /* return the output info */
1018 obj
= output
.pointer
;
1021 if ((obj
->type
!= ACPI_TYPE_PACKAGE
) ||
1022 (obj
->package
.count
!= count
)) {
1026 params
= obj
->package
.elements
;
1030 if (params
== NULL
) {
1035 for (i
= 0; i
< count
; i
++) {
1036 if (argument
->type
!= params
->type
) {
1040 switch (params
->type
) {
1041 case ACPI_TYPE_INTEGER
:
1042 argument
->value
= params
->integer
.value
;
1044 case ACPI_TYPE_STRING
:
1045 if ((params
->string
.length
!= argument
->data_length
) ||
1046 (params
->string
.pointer
== NULL
)) {
1050 strncpy(argument
->pointer
,
1051 params
->string
.pointer
,
1052 params
->string
.length
);
1054 case ACPI_TYPE_BUFFER
:
1055 if (params
->buffer
.pointer
== NULL
) {
1059 memcpy(argument
->pointer
,
1060 params
->buffer
.pointer
,
1061 argument
->data_length
);
1074 kfree((void *)input
.pointer
);
1078 static int amdgpu_cgs_acpi_eval_object(struct cgs_device
*cgs_device
,
1079 struct cgs_acpi_method_info
*info
)
1085 static int amdgpu_cgs_call_acpi_method(struct cgs_device
*cgs_device
,
1086 uint32_t acpi_method
,
1087 uint32_t acpi_function
,
1088 void *pinput
, void *poutput
,
1089 uint32_t output_count
,
1090 uint32_t input_size
,
1091 uint32_t output_size
)
1093 struct cgs_acpi_method_argument acpi_input
[2] = { {0}, {0} };
1094 struct cgs_acpi_method_argument acpi_output
= {0};
1095 struct cgs_acpi_method_info info
= {0};
1097 acpi_input
[0].type
= CGS_ACPI_TYPE_INTEGER
;
1098 acpi_input
[0].data_length
= sizeof(uint32_t);
1099 acpi_input
[0].value
= acpi_function
;
1101 acpi_input
[1].type
= CGS_ACPI_TYPE_BUFFER
;
1102 acpi_input
[1].data_length
= input_size
;
1103 acpi_input
[1].pointer
= pinput
;
1105 acpi_output
.type
= CGS_ACPI_TYPE_BUFFER
;
1106 acpi_output
.data_length
= output_size
;
1107 acpi_output
.pointer
= poutput
;
1109 info
.size
= sizeof(struct cgs_acpi_method_info
);
1110 info
.field
= CGS_ACPI_FIELD_METHOD_NAME
| CGS_ACPI_FIELD_INPUT_ARGUMENT_COUNT
;
1111 info
.input_count
= 2;
1112 info
.name
= acpi_method
;
1113 info
.pinput_argument
= acpi_input
;
1114 info
.output_count
= output_count
;
1115 info
.poutput_argument
= &acpi_output
;
1117 return amdgpu_cgs_acpi_eval_object(cgs_device
, &info
);
1120 static const struct cgs_ops amdgpu_cgs_ops
= {
1121 amdgpu_cgs_gpu_mem_info
,
1122 amdgpu_cgs_gmap_kmem
,
1123 amdgpu_cgs_gunmap_kmem
,
1124 amdgpu_cgs_alloc_gpu_mem
,
1125 amdgpu_cgs_free_gpu_mem
,
1126 amdgpu_cgs_gmap_gpu_mem
,
1127 amdgpu_cgs_gunmap_gpu_mem
,
1128 amdgpu_cgs_kmap_gpu_mem
,
1129 amdgpu_cgs_kunmap_gpu_mem
,
1130 amdgpu_cgs_read_register
,
1131 amdgpu_cgs_write_register
,
1132 amdgpu_cgs_read_ind_register
,
1133 amdgpu_cgs_write_ind_register
,
1134 amdgpu_cgs_read_pci_config_byte
,
1135 amdgpu_cgs_read_pci_config_word
,
1136 amdgpu_cgs_read_pci_config_dword
,
1137 amdgpu_cgs_write_pci_config_byte
,
1138 amdgpu_cgs_write_pci_config_word
,
1139 amdgpu_cgs_write_pci_config_dword
,
1140 amdgpu_cgs_get_pci_resource
,
1141 amdgpu_cgs_atom_get_data_table
,
1142 amdgpu_cgs_atom_get_cmd_table_revs
,
1143 amdgpu_cgs_atom_exec_cmd_table
,
1144 amdgpu_cgs_create_pm_request
,
1145 amdgpu_cgs_destroy_pm_request
,
1146 amdgpu_cgs_set_pm_request
,
1147 amdgpu_cgs_pm_request_clock
,
1148 amdgpu_cgs_pm_request_engine
,
1149 amdgpu_cgs_pm_query_clock_limits
,
1150 amdgpu_cgs_set_camera_voltages
,
1151 amdgpu_cgs_get_firmware_info
,
1152 amdgpu_cgs_rel_firmware
,
1153 amdgpu_cgs_set_powergating_state
,
1154 amdgpu_cgs_set_clockgating_state
,
1155 amdgpu_cgs_get_active_displays_info
,
1156 amdgpu_cgs_notify_dpm_enabled
,
1157 amdgpu_cgs_call_acpi_method
,
1158 amdgpu_cgs_query_system_info
,
1161 static const struct cgs_os_ops amdgpu_cgs_os_ops
= {
1162 amdgpu_cgs_add_irq_source
,
1167 struct cgs_device
*amdgpu_cgs_create_device(struct amdgpu_device
*adev
)
1169 struct amdgpu_cgs_device
*cgs_device
=
1170 kmalloc(sizeof(*cgs_device
), GFP_KERNEL
);
1173 DRM_ERROR("Couldn't allocate CGS device structure\n");
1177 cgs_device
->base
.ops
= &amdgpu_cgs_ops
;
1178 cgs_device
->base
.os_ops
= &amdgpu_cgs_os_ops
;
1179 cgs_device
->adev
= adev
;
1181 return (struct cgs_device
*)cgs_device
;
1184 void amdgpu_cgs_destroy_device(struct cgs_device
*cgs_device
)