2 * Copyright 2007-8 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie
27 #include <drm/amdgpu_drm.h>
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_i2c.h"
33 #include "atom-bits.h"
34 #include "atombios_encoders.h"
35 #include "bif/bif_4_1_d.h"
37 static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device
*adev
,
38 ATOM_GPIO_I2C_ASSIGMENT
*gpio
,
44 static struct amdgpu_i2c_bus_rec
amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT
*gpio
)
46 struct amdgpu_i2c_bus_rec i2c
;
48 memset(&i2c
, 0, sizeof(struct amdgpu_i2c_bus_rec
));
50 i2c
.mask_clk_reg
= le16_to_cpu(gpio
->usClkMaskRegisterIndex
);
51 i2c
.mask_data_reg
= le16_to_cpu(gpio
->usDataMaskRegisterIndex
);
52 i2c
.en_clk_reg
= le16_to_cpu(gpio
->usClkEnRegisterIndex
);
53 i2c
.en_data_reg
= le16_to_cpu(gpio
->usDataEnRegisterIndex
);
54 i2c
.y_clk_reg
= le16_to_cpu(gpio
->usClkY_RegisterIndex
);
55 i2c
.y_data_reg
= le16_to_cpu(gpio
->usDataY_RegisterIndex
);
56 i2c
.a_clk_reg
= le16_to_cpu(gpio
->usClkA_RegisterIndex
);
57 i2c
.a_data_reg
= le16_to_cpu(gpio
->usDataA_RegisterIndex
);
58 i2c
.mask_clk_mask
= (1 << gpio
->ucClkMaskShift
);
59 i2c
.mask_data_mask
= (1 << gpio
->ucDataMaskShift
);
60 i2c
.en_clk_mask
= (1 << gpio
->ucClkEnShift
);
61 i2c
.en_data_mask
= (1 << gpio
->ucDataEnShift
);
62 i2c
.y_clk_mask
= (1 << gpio
->ucClkY_Shift
);
63 i2c
.y_data_mask
= (1 << gpio
->ucDataY_Shift
);
64 i2c
.a_clk_mask
= (1 << gpio
->ucClkA_Shift
);
65 i2c
.a_data_mask
= (1 << gpio
->ucDataA_Shift
);
67 if (gpio
->sucI2cId
.sbfAccess
.bfHW_Capable
)
68 i2c
.hw_capable
= true;
70 i2c
.hw_capable
= false;
72 if (gpio
->sucI2cId
.ucAccess
== 0xa0)
77 i2c
.i2c_id
= gpio
->sucI2cId
.ucAccess
;
87 struct amdgpu_i2c_bus_rec
amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device
*adev
,
90 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
91 ATOM_GPIO_I2C_ASSIGMENT
*gpio
;
92 struct amdgpu_i2c_bus_rec i2c
;
93 int index
= GetIndexIntoMasterTable(DATA
, GPIO_I2C_Info
);
94 struct _ATOM_GPIO_I2C_INFO
*i2c_info
;
95 uint16_t data_offset
, size
;
98 memset(&i2c
, 0, sizeof(struct amdgpu_i2c_bus_rec
));
101 if (amdgpu_atom_parse_data_header(ctx
, index
, &size
, NULL
, NULL
, &data_offset
)) {
102 i2c_info
= (struct _ATOM_GPIO_I2C_INFO
*)(ctx
->bios
+ data_offset
);
104 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
105 sizeof(ATOM_GPIO_I2C_ASSIGMENT
);
107 gpio
= &i2c_info
->asGPIO_Info
[0];
108 for (i
= 0; i
< num_indices
; i
++) {
110 amdgpu_atombios_lookup_i2c_gpio_quirks(adev
, gpio
, i
);
112 if (gpio
->sucI2cId
.ucAccess
== id
) {
113 i2c
= amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio
);
116 gpio
= (ATOM_GPIO_I2C_ASSIGMENT
*)
117 ((u8
*)gpio
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT
));
124 void amdgpu_atombios_i2c_init(struct amdgpu_device
*adev
)
126 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
127 ATOM_GPIO_I2C_ASSIGMENT
*gpio
;
128 struct amdgpu_i2c_bus_rec i2c
;
129 int index
= GetIndexIntoMasterTable(DATA
, GPIO_I2C_Info
);
130 struct _ATOM_GPIO_I2C_INFO
*i2c_info
;
131 uint16_t data_offset
, size
;
135 if (amdgpu_atom_parse_data_header(ctx
, index
, &size
, NULL
, NULL
, &data_offset
)) {
136 i2c_info
= (struct _ATOM_GPIO_I2C_INFO
*)(ctx
->bios
+ data_offset
);
138 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
139 sizeof(ATOM_GPIO_I2C_ASSIGMENT
);
141 gpio
= &i2c_info
->asGPIO_Info
[0];
142 for (i
= 0; i
< num_indices
; i
++) {
143 amdgpu_atombios_lookup_i2c_gpio_quirks(adev
, gpio
, i
);
145 i2c
= amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio
);
148 sprintf(stmp
, "0x%x", i2c
.i2c_id
);
149 adev
->i2c_bus
[i
] = amdgpu_i2c_create(adev
->ddev
, &i2c
, stmp
);
151 gpio
= (ATOM_GPIO_I2C_ASSIGMENT
*)
152 ((u8
*)gpio
+ sizeof(ATOM_GPIO_I2C_ASSIGMENT
));
157 struct amdgpu_gpio_rec
158 amdgpu_atombios_lookup_gpio(struct amdgpu_device
*adev
,
161 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
162 struct amdgpu_gpio_rec gpio
;
163 int index
= GetIndexIntoMasterTable(DATA
, GPIO_Pin_LUT
);
164 struct _ATOM_GPIO_PIN_LUT
*gpio_info
;
165 ATOM_GPIO_PIN_ASSIGNMENT
*pin
;
166 u16 data_offset
, size
;
169 memset(&gpio
, 0, sizeof(struct amdgpu_gpio_rec
));
172 if (amdgpu_atom_parse_data_header(ctx
, index
, &size
, NULL
, NULL
, &data_offset
)) {
173 gpio_info
= (struct _ATOM_GPIO_PIN_LUT
*)(ctx
->bios
+ data_offset
);
175 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
176 sizeof(ATOM_GPIO_PIN_ASSIGNMENT
);
178 pin
= gpio_info
->asGPIO_Pin
;
179 for (i
= 0; i
< num_indices
; i
++) {
180 if (id
== pin
->ucGPIO_ID
) {
181 gpio
.id
= pin
->ucGPIO_ID
;
182 gpio
.reg
= le16_to_cpu(pin
->usGpioPin_AIndex
);
183 gpio
.shift
= pin
->ucGpioPinBitShift
;
184 gpio
.mask
= (1 << pin
->ucGpioPinBitShift
);
188 pin
= (ATOM_GPIO_PIN_ASSIGNMENT
*)
189 ((u8
*)pin
+ sizeof(ATOM_GPIO_PIN_ASSIGNMENT
));
196 static struct amdgpu_hpd
197 amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device
*adev
,
198 struct amdgpu_gpio_rec
*gpio
)
200 struct amdgpu_hpd hpd
;
203 memset(&hpd
, 0, sizeof(struct amdgpu_hpd
));
205 reg
= amdgpu_display_hpd_get_gpio_reg(adev
);
208 if (gpio
->reg
== reg
) {
211 hpd
.hpd
= AMDGPU_HPD_1
;
214 hpd
.hpd
= AMDGPU_HPD_2
;
217 hpd
.hpd
= AMDGPU_HPD_3
;
220 hpd
.hpd
= AMDGPU_HPD_4
;
223 hpd
.hpd
= AMDGPU_HPD_5
;
226 hpd
.hpd
= AMDGPU_HPD_6
;
229 hpd
.hpd
= AMDGPU_HPD_NONE
;
233 hpd
.hpd
= AMDGPU_HPD_NONE
;
237 static const int object_connector_convert
[] = {
238 DRM_MODE_CONNECTOR_Unknown
,
239 DRM_MODE_CONNECTOR_DVII
,
240 DRM_MODE_CONNECTOR_DVII
,
241 DRM_MODE_CONNECTOR_DVID
,
242 DRM_MODE_CONNECTOR_DVID
,
243 DRM_MODE_CONNECTOR_VGA
,
244 DRM_MODE_CONNECTOR_Composite
,
245 DRM_MODE_CONNECTOR_SVIDEO
,
246 DRM_MODE_CONNECTOR_Unknown
,
247 DRM_MODE_CONNECTOR_Unknown
,
248 DRM_MODE_CONNECTOR_9PinDIN
,
249 DRM_MODE_CONNECTOR_Unknown
,
250 DRM_MODE_CONNECTOR_HDMIA
,
251 DRM_MODE_CONNECTOR_HDMIB
,
252 DRM_MODE_CONNECTOR_LVDS
,
253 DRM_MODE_CONNECTOR_9PinDIN
,
254 DRM_MODE_CONNECTOR_Unknown
,
255 DRM_MODE_CONNECTOR_Unknown
,
256 DRM_MODE_CONNECTOR_Unknown
,
257 DRM_MODE_CONNECTOR_DisplayPort
,
258 DRM_MODE_CONNECTOR_eDP
,
259 DRM_MODE_CONNECTOR_Unknown
262 bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device
*adev
)
264 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
265 struct atom_context
*ctx
= mode_info
->atom_context
;
266 int index
= GetIndexIntoMasterTable(DATA
, Object_Header
);
267 u16 size
, data_offset
;
269 ATOM_CONNECTOR_OBJECT_TABLE
*con_obj
;
270 ATOM_ENCODER_OBJECT_TABLE
*enc_obj
;
271 ATOM_OBJECT_TABLE
*router_obj
;
272 ATOM_DISPLAY_OBJECT_PATH_TABLE
*path_obj
;
273 ATOM_OBJECT_HEADER
*obj_header
;
274 int i
, j
, k
, path_size
, device_support
;
276 u16 conn_id
, connector_object_id
;
277 struct amdgpu_i2c_bus_rec ddc_bus
;
278 struct amdgpu_router router
;
279 struct amdgpu_gpio_rec gpio
;
280 struct amdgpu_hpd hpd
;
282 if (!amdgpu_atom_parse_data_header(ctx
, index
, &size
, &frev
, &crev
, &data_offset
))
288 obj_header
= (ATOM_OBJECT_HEADER
*) (ctx
->bios
+ data_offset
);
289 path_obj
= (ATOM_DISPLAY_OBJECT_PATH_TABLE
*)
290 (ctx
->bios
+ data_offset
+
291 le16_to_cpu(obj_header
->usDisplayPathTableOffset
));
292 con_obj
= (ATOM_CONNECTOR_OBJECT_TABLE
*)
293 (ctx
->bios
+ data_offset
+
294 le16_to_cpu(obj_header
->usConnectorObjectTableOffset
));
295 enc_obj
= (ATOM_ENCODER_OBJECT_TABLE
*)
296 (ctx
->bios
+ data_offset
+
297 le16_to_cpu(obj_header
->usEncoderObjectTableOffset
));
298 router_obj
= (ATOM_OBJECT_TABLE
*)
299 (ctx
->bios
+ data_offset
+
300 le16_to_cpu(obj_header
->usRouterObjectTableOffset
));
301 device_support
= le16_to_cpu(obj_header
->usDeviceSupport
);
304 for (i
= 0; i
< path_obj
->ucNumOfDispPath
; i
++) {
305 uint8_t *addr
= (uint8_t *) path_obj
->asDispPath
;
306 ATOM_DISPLAY_OBJECT_PATH
*path
;
308 path
= (ATOM_DISPLAY_OBJECT_PATH
*) addr
;
309 path_size
+= le16_to_cpu(path
->usSize
);
311 if (device_support
& le16_to_cpu(path
->usDeviceTag
)) {
312 uint8_t con_obj_id
, con_obj_num
, con_obj_type
;
315 (le16_to_cpu(path
->usConnObjectId
) & OBJECT_ID_MASK
)
318 (le16_to_cpu(path
->usConnObjectId
) & ENUM_ID_MASK
)
321 (le16_to_cpu(path
->usConnObjectId
) &
322 OBJECT_TYPE_MASK
) >> OBJECT_TYPE_SHIFT
;
325 object_connector_convert
[con_obj_id
];
326 connector_object_id
= con_obj_id
;
328 if (connector_type
== DRM_MODE_CONNECTOR_Unknown
)
331 router
.ddc_valid
= false;
332 router
.cd_valid
= false;
333 for (j
= 0; j
< ((le16_to_cpu(path
->usSize
) - 8) / 2); j
++) {
334 uint8_t grph_obj_id
, grph_obj_num
, grph_obj_type
;
337 (le16_to_cpu(path
->usGraphicObjIds
[j
]) &
338 OBJECT_ID_MASK
) >> OBJECT_ID_SHIFT
;
340 (le16_to_cpu(path
->usGraphicObjIds
[j
]) &
341 ENUM_ID_MASK
) >> ENUM_ID_SHIFT
;
343 (le16_to_cpu(path
->usGraphicObjIds
[j
]) &
344 OBJECT_TYPE_MASK
) >> OBJECT_TYPE_SHIFT
;
346 if (grph_obj_type
== GRAPH_OBJECT_TYPE_ENCODER
) {
347 for (k
= 0; k
< enc_obj
->ucNumberOfObjects
; k
++) {
348 u16 encoder_obj
= le16_to_cpu(enc_obj
->asObjects
[k
].usObjectID
);
349 if (le16_to_cpu(path
->usGraphicObjIds
[j
]) == encoder_obj
) {
350 ATOM_COMMON_RECORD_HEADER
*record
= (ATOM_COMMON_RECORD_HEADER
*)
351 (ctx
->bios
+ data_offset
+
352 le16_to_cpu(enc_obj
->asObjects
[k
].usRecordOffset
));
353 ATOM_ENCODER_CAP_RECORD
*cap_record
;
356 while (record
->ucRecordSize
> 0 &&
357 record
->ucRecordType
> 0 &&
358 record
->ucRecordType
<= ATOM_MAX_OBJECT_RECORD_NUMBER
) {
359 switch (record
->ucRecordType
) {
360 case ATOM_ENCODER_CAP_RECORD_TYPE
:
361 cap_record
=(ATOM_ENCODER_CAP_RECORD
*)
363 caps
= le16_to_cpu(cap_record
->usEncoderCap
);
366 record
= (ATOM_COMMON_RECORD_HEADER
*)
367 ((char *)record
+ record
->ucRecordSize
);
369 amdgpu_display_add_encoder(adev
, encoder_obj
,
370 le16_to_cpu(path
->usDeviceTag
),
374 } else if (grph_obj_type
== GRAPH_OBJECT_TYPE_ROUTER
) {
375 for (k
= 0; k
< router_obj
->ucNumberOfObjects
; k
++) {
376 u16 router_obj_id
= le16_to_cpu(router_obj
->asObjects
[k
].usObjectID
);
377 if (le16_to_cpu(path
->usGraphicObjIds
[j
]) == router_obj_id
) {
378 ATOM_COMMON_RECORD_HEADER
*record
= (ATOM_COMMON_RECORD_HEADER
*)
379 (ctx
->bios
+ data_offset
+
380 le16_to_cpu(router_obj
->asObjects
[k
].usRecordOffset
));
381 ATOM_I2C_RECORD
*i2c_record
;
382 ATOM_I2C_ID_CONFIG_ACCESS
*i2c_config
;
383 ATOM_ROUTER_DDC_PATH_SELECT_RECORD
*ddc_path
;
384 ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
*cd_path
;
385 ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT
*router_src_dst_table
=
386 (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT
*)
387 (ctx
->bios
+ data_offset
+
388 le16_to_cpu(router_obj
->asObjects
[k
].usSrcDstTableOffset
));
389 u8
*num_dst_objs
= (u8
*)
390 ((u8
*)router_src_dst_table
+ 1 +
391 (router_src_dst_table
->ucNumberOfSrc
* 2));
392 u16
*dst_objs
= (u16
*)(num_dst_objs
+ 1);
395 router
.router_id
= router_obj_id
;
396 for (enum_id
= 0; enum_id
< (*num_dst_objs
); enum_id
++) {
397 if (le16_to_cpu(path
->usConnObjectId
) ==
398 le16_to_cpu(dst_objs
[enum_id
]))
402 while (record
->ucRecordSize
> 0 &&
403 record
->ucRecordType
> 0 &&
404 record
->ucRecordType
<= ATOM_MAX_OBJECT_RECORD_NUMBER
) {
405 switch (record
->ucRecordType
) {
406 case ATOM_I2C_RECORD_TYPE
:
411 (ATOM_I2C_ID_CONFIG_ACCESS
*)
412 &i2c_record
->sucI2cId
;
414 amdgpu_atombios_lookup_i2c_gpio(adev
,
417 router
.i2c_addr
= i2c_record
->ucI2CAddr
>> 1;
419 case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE
:
420 ddc_path
= (ATOM_ROUTER_DDC_PATH_SELECT_RECORD
*)
422 router
.ddc_valid
= true;
423 router
.ddc_mux_type
= ddc_path
->ucMuxType
;
424 router
.ddc_mux_control_pin
= ddc_path
->ucMuxControlPin
;
425 router
.ddc_mux_state
= ddc_path
->ucMuxState
[enum_id
];
427 case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE
:
428 cd_path
= (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
*)
430 router
.cd_valid
= true;
431 router
.cd_mux_type
= cd_path
->ucMuxType
;
432 router
.cd_mux_control_pin
= cd_path
->ucMuxControlPin
;
433 router
.cd_mux_state
= cd_path
->ucMuxState
[enum_id
];
436 record
= (ATOM_COMMON_RECORD_HEADER
*)
437 ((char *)record
+ record
->ucRecordSize
);
444 /* look up gpio for ddc, hpd */
445 ddc_bus
.valid
= false;
446 hpd
.hpd
= AMDGPU_HPD_NONE
;
447 if ((le16_to_cpu(path
->usDeviceTag
) &
448 (ATOM_DEVICE_TV_SUPPORT
| ATOM_DEVICE_CV_SUPPORT
)) == 0) {
449 for (j
= 0; j
< con_obj
->ucNumberOfObjects
; j
++) {
450 if (le16_to_cpu(path
->usConnObjectId
) ==
451 le16_to_cpu(con_obj
->asObjects
[j
].
453 ATOM_COMMON_RECORD_HEADER
455 (ATOM_COMMON_RECORD_HEADER
457 (ctx
->bios
+ data_offset
+
458 le16_to_cpu(con_obj
->
461 ATOM_I2C_RECORD
*i2c_record
;
462 ATOM_HPD_INT_RECORD
*hpd_record
;
463 ATOM_I2C_ID_CONFIG_ACCESS
*i2c_config
;
465 while (record
->ucRecordSize
> 0 &&
466 record
->ucRecordType
> 0 &&
467 record
->ucRecordType
<= ATOM_MAX_OBJECT_RECORD_NUMBER
) {
468 switch (record
->ucRecordType
) {
469 case ATOM_I2C_RECORD_TYPE
:
474 (ATOM_I2C_ID_CONFIG_ACCESS
*)
475 &i2c_record
->sucI2cId
;
476 ddc_bus
= amdgpu_atombios_lookup_i2c_gpio(adev
,
480 case ATOM_HPD_INT_RECORD_TYPE
:
482 (ATOM_HPD_INT_RECORD
*)
484 gpio
= amdgpu_atombios_lookup_gpio(adev
,
485 hpd_record
->ucHPDIntGPIOID
);
486 hpd
= amdgpu_atombios_get_hpd_info_from_gpio(adev
, &gpio
);
487 hpd
.plugged_state
= hpd_record
->ucPlugged_PinState
;
491 (ATOM_COMMON_RECORD_HEADER
502 /* needed for aux chan transactions */
503 ddc_bus
.hpd
= hpd
.hpd
;
505 conn_id
= le16_to_cpu(path
->usConnObjectId
);
507 amdgpu_display_add_connector(adev
,
509 le16_to_cpu(path
->usDeviceTag
),
510 connector_type
, &ddc_bus
,
518 amdgpu_link_encoder_connector(adev
->ddev
);
523 union firmware_info
{
524 ATOM_FIRMWARE_INFO info
;
525 ATOM_FIRMWARE_INFO_V1_2 info_12
;
526 ATOM_FIRMWARE_INFO_V1_3 info_13
;
527 ATOM_FIRMWARE_INFO_V1_4 info_14
;
528 ATOM_FIRMWARE_INFO_V2_1 info_21
;
529 ATOM_FIRMWARE_INFO_V2_2 info_22
;
532 int amdgpu_atombios_get_clock_info(struct amdgpu_device
*adev
)
534 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
535 int index
= GetIndexIntoMasterTable(DATA
, FirmwareInfo
);
537 uint16_t data_offset
;
540 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
541 &frev
, &crev
, &data_offset
)) {
543 struct amdgpu_pll
*ppll
= &adev
->clock
.ppll
[0];
544 struct amdgpu_pll
*spll
= &adev
->clock
.spll
;
545 struct amdgpu_pll
*mpll
= &adev
->clock
.mpll
;
546 union firmware_info
*firmware_info
=
547 (union firmware_info
*)(mode_info
->atom_context
->bios
+
550 ppll
->reference_freq
=
551 le16_to_cpu(firmware_info
->info
.usReferenceClock
);
552 ppll
->reference_div
= 0;
556 le16_to_cpu(firmware_info
->info
.usMinPixelClockPLL_Output
);
559 le32_to_cpu(firmware_info
->info_12
.ulMinPixelClockPLL_Output
);
561 le32_to_cpu(firmware_info
->info
.ulMaxPixelClockPLL_Output
);
564 ppll
->lcd_pll_out_min
=
565 le16_to_cpu(firmware_info
->info_14
.usLcdMinPixelClockPLL_Output
) * 100;
566 if (ppll
->lcd_pll_out_min
== 0)
567 ppll
->lcd_pll_out_min
= ppll
->pll_out_min
;
568 ppll
->lcd_pll_out_max
=
569 le16_to_cpu(firmware_info
->info_14
.usLcdMaxPixelClockPLL_Output
) * 100;
570 if (ppll
->lcd_pll_out_max
== 0)
571 ppll
->lcd_pll_out_max
= ppll
->pll_out_max
;
573 ppll
->lcd_pll_out_min
= ppll
->pll_out_min
;
574 ppll
->lcd_pll_out_max
= ppll
->pll_out_max
;
577 if (ppll
->pll_out_min
== 0)
578 ppll
->pll_out_min
= 64800;
581 le16_to_cpu(firmware_info
->info
.usMinPixelClockPLL_Input
);
583 le16_to_cpu(firmware_info
->info
.usMaxPixelClockPLL_Input
);
585 ppll
->min_post_div
= 2;
586 ppll
->max_post_div
= 0x7f;
587 ppll
->min_frac_feedback_div
= 0;
588 ppll
->max_frac_feedback_div
= 9;
589 ppll
->min_ref_div
= 2;
590 ppll
->max_ref_div
= 0x3ff;
591 ppll
->min_feedback_div
= 4;
592 ppll
->max_feedback_div
= 0xfff;
595 for (i
= 1; i
< AMDGPU_MAX_PPLL
; i
++)
596 adev
->clock
.ppll
[i
] = *ppll
;
599 spll
->reference_freq
=
600 le16_to_cpu(firmware_info
->info_21
.usCoreReferenceClock
);
601 spll
->reference_div
= 0;
604 le16_to_cpu(firmware_info
->info
.usMinEngineClockPLL_Output
);
606 le32_to_cpu(firmware_info
->info
.ulMaxEngineClockPLL_Output
);
609 if (spll
->pll_out_min
== 0)
610 spll
->pll_out_min
= 64800;
613 le16_to_cpu(firmware_info
->info
.usMinEngineClockPLL_Input
);
615 le16_to_cpu(firmware_info
->info
.usMaxEngineClockPLL_Input
);
617 spll
->min_post_div
= 1;
618 spll
->max_post_div
= 1;
619 spll
->min_ref_div
= 2;
620 spll
->max_ref_div
= 0xff;
621 spll
->min_feedback_div
= 4;
622 spll
->max_feedback_div
= 0xff;
626 mpll
->reference_freq
=
627 le16_to_cpu(firmware_info
->info_21
.usMemoryReferenceClock
);
628 mpll
->reference_div
= 0;
631 le16_to_cpu(firmware_info
->info
.usMinMemoryClockPLL_Output
);
633 le32_to_cpu(firmware_info
->info
.ulMaxMemoryClockPLL_Output
);
636 if (mpll
->pll_out_min
== 0)
637 mpll
->pll_out_min
= 64800;
640 le16_to_cpu(firmware_info
->info
.usMinMemoryClockPLL_Input
);
642 le16_to_cpu(firmware_info
->info
.usMaxMemoryClockPLL_Input
);
644 adev
->clock
.default_sclk
=
645 le32_to_cpu(firmware_info
->info
.ulDefaultEngineClock
);
646 adev
->clock
.default_mclk
=
647 le32_to_cpu(firmware_info
->info
.ulDefaultMemoryClock
);
649 mpll
->min_post_div
= 1;
650 mpll
->max_post_div
= 1;
651 mpll
->min_ref_div
= 2;
652 mpll
->max_ref_div
= 0xff;
653 mpll
->min_feedback_div
= 4;
654 mpll
->max_feedback_div
= 0xff;
658 adev
->clock
.default_dispclk
=
659 le32_to_cpu(firmware_info
->info_21
.ulDefaultDispEngineClkFreq
);
660 /* set a reasonable default for DP */
661 if (adev
->clock
.default_dispclk
< 53900) {
662 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
663 adev
->clock
.default_dispclk
/ 100);
664 adev
->clock
.default_dispclk
= 60000;
666 adev
->clock
.dp_extclk
=
667 le16_to_cpu(firmware_info
->info_21
.usUniphyDPModeExtClkFreq
);
668 adev
->clock
.current_dispclk
= adev
->clock
.default_dispclk
;
670 adev
->clock
.max_pixel_clock
= le16_to_cpu(firmware_info
->info
.usMaxPixelClock
);
671 if (adev
->clock
.max_pixel_clock
== 0)
672 adev
->clock
.max_pixel_clock
= 40000;
674 /* not technically a clock, but... */
675 adev
->mode_info
.firmware_flags
=
676 le16_to_cpu(firmware_info
->info
.usFirmwareCapability
.susAccess
);
681 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
682 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
688 ATOM_GFX_INFO_V2_1 info
;
691 int amdgpu_atombios_get_gfx_info(struct amdgpu_device
*adev
)
693 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
694 int index
= GetIndexIntoMasterTable(DATA
, GFX_Info
);
696 uint16_t data_offset
;
699 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
700 &frev
, &crev
, &data_offset
)) {
701 union gfx_info
*gfx_info
= (union gfx_info
*)
702 (mode_info
->atom_context
->bios
+ data_offset
);
704 adev
->gfx
.config
.max_shader_engines
= gfx_info
->info
.max_shader_engines
;
705 adev
->gfx
.config
.max_tile_pipes
= gfx_info
->info
.max_tile_pipes
;
706 adev
->gfx
.config
.max_cu_per_sh
= gfx_info
->info
.max_cu_per_sh
;
707 adev
->gfx
.config
.max_sh_per_se
= gfx_info
->info
.max_sh_per_se
;
708 adev
->gfx
.config
.max_backends_per_se
= gfx_info
->info
.max_backends_per_se
;
709 adev
->gfx
.config
.max_texture_channel_caches
=
710 gfx_info
->info
.max_texture_channel_caches
;
718 struct _ATOM_INTEGRATED_SYSTEM_INFO info
;
719 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2
;
720 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6
;
721 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7
;
722 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8
;
723 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9
;
726 static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device
*adev
,
727 struct amdgpu_atom_ss
*ss
,
730 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
731 int index
= GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
);
732 u16 data_offset
, size
;
733 union igp_info
*igp_info
;
735 u16 percentage
= 0, rate
= 0;
737 /* get any igp specific overrides */
738 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, &size
,
739 &frev
, &crev
, &data_offset
)) {
740 igp_info
= (union igp_info
*)
741 (mode_info
->atom_context
->bios
+ data_offset
);
745 case ASIC_INTERNAL_SS_ON_TMDS
:
746 percentage
= le16_to_cpu(igp_info
->info_6
.usDVISSPercentage
);
747 rate
= le16_to_cpu(igp_info
->info_6
.usDVISSpreadRateIn10Hz
);
749 case ASIC_INTERNAL_SS_ON_HDMI
:
750 percentage
= le16_to_cpu(igp_info
->info_6
.usHDMISSPercentage
);
751 rate
= le16_to_cpu(igp_info
->info_6
.usHDMISSpreadRateIn10Hz
);
753 case ASIC_INTERNAL_SS_ON_LVDS
:
754 percentage
= le16_to_cpu(igp_info
->info_6
.usLvdsSSPercentage
);
755 rate
= le16_to_cpu(igp_info
->info_6
.usLvdsSSpreadRateIn10Hz
);
761 case ASIC_INTERNAL_SS_ON_TMDS
:
762 percentage
= le16_to_cpu(igp_info
->info_7
.usDVISSPercentage
);
763 rate
= le16_to_cpu(igp_info
->info_7
.usDVISSpreadRateIn10Hz
);
765 case ASIC_INTERNAL_SS_ON_HDMI
:
766 percentage
= le16_to_cpu(igp_info
->info_7
.usHDMISSPercentage
);
767 rate
= le16_to_cpu(igp_info
->info_7
.usHDMISSpreadRateIn10Hz
);
769 case ASIC_INTERNAL_SS_ON_LVDS
:
770 percentage
= le16_to_cpu(igp_info
->info_7
.usLvdsSSPercentage
);
771 rate
= le16_to_cpu(igp_info
->info_7
.usLvdsSSpreadRateIn10Hz
);
777 case ASIC_INTERNAL_SS_ON_TMDS
:
778 percentage
= le16_to_cpu(igp_info
->info_8
.usDVISSPercentage
);
779 rate
= le16_to_cpu(igp_info
->info_8
.usDVISSpreadRateIn10Hz
);
781 case ASIC_INTERNAL_SS_ON_HDMI
:
782 percentage
= le16_to_cpu(igp_info
->info_8
.usHDMISSPercentage
);
783 rate
= le16_to_cpu(igp_info
->info_8
.usHDMISSpreadRateIn10Hz
);
785 case ASIC_INTERNAL_SS_ON_LVDS
:
786 percentage
= le16_to_cpu(igp_info
->info_8
.usLvdsSSPercentage
);
787 rate
= le16_to_cpu(igp_info
->info_8
.usLvdsSSpreadRateIn10Hz
);
793 case ASIC_INTERNAL_SS_ON_TMDS
:
794 percentage
= le16_to_cpu(igp_info
->info_9
.usDVISSPercentage
);
795 rate
= le16_to_cpu(igp_info
->info_9
.usDVISSpreadRateIn10Hz
);
797 case ASIC_INTERNAL_SS_ON_HDMI
:
798 percentage
= le16_to_cpu(igp_info
->info_9
.usHDMISSPercentage
);
799 rate
= le16_to_cpu(igp_info
->info_9
.usHDMISSpreadRateIn10Hz
);
801 case ASIC_INTERNAL_SS_ON_LVDS
:
802 percentage
= le16_to_cpu(igp_info
->info_9
.usLvdsSSPercentage
);
803 rate
= le16_to_cpu(igp_info
->info_9
.usLvdsSSpreadRateIn10Hz
);
808 DRM_ERROR("Unsupported IGP table: %d %d\n", frev
, crev
);
812 ss
->percentage
= percentage
;
819 struct _ATOM_ASIC_INTERNAL_SS_INFO info
;
820 struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2
;
821 struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3
;
824 union asic_ss_assignment
{
825 struct _ATOM_ASIC_SS_ASSIGNMENT v1
;
826 struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2
;
827 struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3
;
830 bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device
*adev
,
831 struct amdgpu_atom_ss
*ss
,
834 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
835 int index
= GetIndexIntoMasterTable(DATA
, ASIC_InternalSS_Info
);
836 uint16_t data_offset
, size
;
837 union asic_ss_info
*ss_info
;
838 union asic_ss_assignment
*ss_assign
;
842 if (id
== ASIC_INTERNAL_MEMORY_SS
) {
843 if (!(adev
->mode_info
.firmware_flags
& ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT
))
846 if (id
== ASIC_INTERNAL_ENGINE_SS
) {
847 if (!(adev
->mode_info
.firmware_flags
& ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT
))
851 memset(ss
, 0, sizeof(struct amdgpu_atom_ss
));
852 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, &size
,
853 &frev
, &crev
, &data_offset
)) {
856 (union asic_ss_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
860 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
861 sizeof(ATOM_ASIC_SS_ASSIGNMENT
);
863 ss_assign
= (union asic_ss_assignment
*)((u8
*)&ss_info
->info
.asSpreadSpectrum
[0]);
864 for (i
= 0; i
< num_indices
; i
++) {
865 if ((ss_assign
->v1
.ucClockIndication
== id
) &&
866 (clock
<= le32_to_cpu(ss_assign
->v1
.ulTargetClockRange
))) {
868 le16_to_cpu(ss_assign
->v1
.usSpreadSpectrumPercentage
);
869 ss
->type
= ss_assign
->v1
.ucSpreadSpectrumMode
;
870 ss
->rate
= le16_to_cpu(ss_assign
->v1
.usSpreadRateInKhz
);
871 ss
->percentage_divider
= 100;
874 ss_assign
= (union asic_ss_assignment
*)
875 ((u8
*)ss_assign
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT
));
879 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
880 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2
);
881 ss_assign
= (union asic_ss_assignment
*)((u8
*)&ss_info
->info_2
.asSpreadSpectrum
[0]);
882 for (i
= 0; i
< num_indices
; i
++) {
883 if ((ss_assign
->v2
.ucClockIndication
== id
) &&
884 (clock
<= le32_to_cpu(ss_assign
->v2
.ulTargetClockRange
))) {
886 le16_to_cpu(ss_assign
->v2
.usSpreadSpectrumPercentage
);
887 ss
->type
= ss_assign
->v2
.ucSpreadSpectrumMode
;
888 ss
->rate
= le16_to_cpu(ss_assign
->v2
.usSpreadRateIn10Hz
);
889 ss
->percentage_divider
= 100;
891 ((id
== ASIC_INTERNAL_ENGINE_SS
) ||
892 (id
== ASIC_INTERNAL_MEMORY_SS
)))
896 ss_assign
= (union asic_ss_assignment
*)
897 ((u8
*)ss_assign
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2
));
901 num_indices
= (size
- sizeof(ATOM_COMMON_TABLE_HEADER
)) /
902 sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3
);
903 ss_assign
= (union asic_ss_assignment
*)((u8
*)&ss_info
->info_3
.asSpreadSpectrum
[0]);
904 for (i
= 0; i
< num_indices
; i
++) {
905 if ((ss_assign
->v3
.ucClockIndication
== id
) &&
906 (clock
<= le32_to_cpu(ss_assign
->v3
.ulTargetClockRange
))) {
908 le16_to_cpu(ss_assign
->v3
.usSpreadSpectrumPercentage
);
909 ss
->type
= ss_assign
->v3
.ucSpreadSpectrumMode
;
910 ss
->rate
= le16_to_cpu(ss_assign
->v3
.usSpreadRateIn10Hz
);
911 if (ss_assign
->v3
.ucSpreadSpectrumMode
&
912 SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK
)
913 ss
->percentage_divider
= 1000;
915 ss
->percentage_divider
= 100;
916 if ((id
== ASIC_INTERNAL_ENGINE_SS
) ||
917 (id
== ASIC_INTERNAL_MEMORY_SS
))
919 if (adev
->flags
& AMD_IS_APU
)
920 amdgpu_atombios_get_igp_ss_overrides(adev
, ss
, id
);
923 ss_assign
= (union asic_ss_assignment
*)
924 ((u8
*)ss_assign
+ sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3
));
928 DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev
, crev
);
936 union get_clock_dividers
{
937 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1
;
938 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2
;
939 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3
;
940 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4
;
941 struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5
;
942 struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in
;
943 struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out
;
946 int amdgpu_atombios_get_clock_dividers(struct amdgpu_device
*adev
,
950 struct atom_clock_dividers
*dividers
)
952 union get_clock_dividers args
;
953 int index
= GetIndexIntoMasterTable(COMMAND
, ComputeMemoryEnginePLL
);
956 memset(&args
, 0, sizeof(args
));
957 memset(dividers
, 0, sizeof(struct atom_clock_dividers
));
959 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
965 args
.v4
.ulClock
= cpu_to_le32(clock
); /* 10 khz */
967 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
969 dividers
->post_divider
= dividers
->post_div
= args
.v4
.ucPostDiv
;
970 dividers
->real_clock
= le32_to_cpu(args
.v4
.ulClock
);
974 /* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
975 args
.v6_in
.ulClock
.ulComputeClockFlag
= clock_type
;
976 args
.v6_in
.ulClock
.ulClockFreq
= cpu_to_le32(clock
); /* 10 khz */
978 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
980 dividers
->whole_fb_div
= le16_to_cpu(args
.v6_out
.ulFbDiv
.usFbDiv
);
981 dividers
->frac_fb_div
= le16_to_cpu(args
.v6_out
.ulFbDiv
.usFbDivFrac
);
982 dividers
->ref_div
= args
.v6_out
.ucPllRefDiv
;
983 dividers
->post_div
= args
.v6_out
.ucPllPostDiv
;
984 dividers
->flags
= args
.v6_out
.ucPllCntlFlag
;
985 dividers
->real_clock
= le32_to_cpu(args
.v6_out
.ulClock
.ulClock
);
986 dividers
->post_divider
= args
.v6_out
.ulClock
.ucPostDiv
;
994 int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device
*adev
,
997 struct atom_mpll_param
*mpll_param
)
999 COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args
;
1000 int index
= GetIndexIntoMasterTable(COMMAND
, ComputeMemoryClockParam
);
1003 memset(&args
, 0, sizeof(args
));
1004 memset(mpll_param
, 0, sizeof(struct atom_mpll_param
));
1006 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
1014 args
.ulClock
= cpu_to_le32(clock
); /* 10 khz */
1015 args
.ucInputFlag
= 0;
1017 args
.ucInputFlag
|= MPLL_INPUT_FLAG_STROBE_MODE_EN
;
1019 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1021 mpll_param
->clkfrac
= le16_to_cpu(args
.ulFbDiv
.usFbDivFrac
);
1022 mpll_param
->clkf
= le16_to_cpu(args
.ulFbDiv
.usFbDiv
);
1023 mpll_param
->post_div
= args
.ucPostDiv
;
1024 mpll_param
->dll_speed
= args
.ucDllSpeed
;
1025 mpll_param
->bwcntl
= args
.ucBWCntl
;
1026 mpll_param
->vco_mode
=
1027 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_VCO_MODE_MASK
);
1028 mpll_param
->yclk_sel
=
1029 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_BYPASS_DQ_PLL
) ? 1 : 0;
1031 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_QDR_ENABLE
) ? 1 : 0;
1032 mpll_param
->half_rate
=
1033 (args
.ucPllCntlFlag
& MPLL_CNTL_FLAG_AD_HALF_RATE
) ? 1 : 0;
1045 uint32_t amdgpu_atombios_get_engine_clock(struct amdgpu_device
*adev
)
1047 GET_ENGINE_CLOCK_PS_ALLOCATION args
;
1048 int index
= GetIndexIntoMasterTable(COMMAND
, GetEngineClock
);
1050 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1051 return le32_to_cpu(args
.ulReturnEngineClock
);
1054 uint32_t amdgpu_atombios_get_memory_clock(struct amdgpu_device
*adev
)
1056 GET_MEMORY_CLOCK_PS_ALLOCATION args
;
1057 int index
= GetIndexIntoMasterTable(COMMAND
, GetMemoryClock
);
1059 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1060 return le32_to_cpu(args
.ulReturnMemoryClock
);
1063 void amdgpu_atombios_set_engine_clock(struct amdgpu_device
*adev
,
1066 SET_ENGINE_CLOCK_PS_ALLOCATION args
;
1067 int index
= GetIndexIntoMasterTable(COMMAND
, SetEngineClock
);
1069 args
.ulTargetEngineClock
= cpu_to_le32(eng_clock
); /* 10 khz */
1071 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1074 void amdgpu_atombios_set_memory_clock(struct amdgpu_device
*adev
,
1077 SET_MEMORY_CLOCK_PS_ALLOCATION args
;
1078 int index
= GetIndexIntoMasterTable(COMMAND
, SetMemoryClock
);
1080 if (adev
->flags
& AMD_IS_APU
)
1083 args
.ulTargetMemoryClock
= cpu_to_le32(mem_clock
); /* 10 khz */
1085 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1088 void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device
*adev
,
1089 u32 eng_clock
, u32 mem_clock
)
1091 SET_ENGINE_CLOCK_PS_ALLOCATION args
;
1092 int index
= GetIndexIntoMasterTable(COMMAND
, DynamicMemorySettings
);
1095 memset(&args
, 0, sizeof(args
));
1097 tmp
= eng_clock
& SET_CLOCK_FREQ_MASK
;
1098 tmp
|= (COMPUTE_ENGINE_PLL_PARAM
<< 24);
1100 args
.ulTargetEngineClock
= cpu_to_le32(tmp
);
1102 args
.sReserved
.ulClock
= cpu_to_le32(mem_clock
& SET_CLOCK_FREQ_MASK
);
1104 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1108 struct _SET_VOLTAGE_PS_ALLOCATION alloc
;
1109 struct _SET_VOLTAGE_PARAMETERS v1
;
1110 struct _SET_VOLTAGE_PARAMETERS_V2 v2
;
1111 struct _SET_VOLTAGE_PARAMETERS_V1_3 v3
;
1114 void amdgpu_atombios_set_voltage(struct amdgpu_device
*adev
,
1118 union set_voltage args
;
1119 int index
= GetIndexIntoMasterTable(COMMAND
, SetVoltage
);
1120 u8 frev
, crev
, volt_index
= voltage_level
;
1122 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
1125 /* 0xff01 is a flag rather then an actual voltage */
1126 if (voltage_level
== 0xff01)
1131 args
.v1
.ucVoltageType
= voltage_type
;
1132 args
.v1
.ucVoltageMode
= SET_ASIC_VOLTAGE_MODE_ALL_SOURCE
;
1133 args
.v1
.ucVoltageIndex
= volt_index
;
1136 args
.v2
.ucVoltageType
= voltage_type
;
1137 args
.v2
.ucVoltageMode
= SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE
;
1138 args
.v2
.usVoltageLevel
= cpu_to_le16(voltage_level
);
1141 args
.v3
.ucVoltageType
= voltage_type
;
1142 args
.v3
.ucVoltageMode
= ATOM_SET_VOLTAGE
;
1143 args
.v3
.usVoltageLevel
= cpu_to_le16(voltage_level
);
1146 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1150 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1153 int amdgpu_atombios_get_leakage_id_from_vbios(struct amdgpu_device
*adev
,
1156 union set_voltage args
;
1157 int index
= GetIndexIntoMasterTable(COMMAND
, SetVoltage
);
1160 if (!amdgpu_atom_parse_cmd_header(adev
->mode_info
.atom_context
, index
, &frev
, &crev
))
1166 args
.v3
.ucVoltageType
= 0;
1167 args
.v3
.ucVoltageMode
= ATOM_GET_LEAKAGE_ID
;
1168 args
.v3
.usVoltageLevel
= 0;
1170 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1172 *leakage_id
= le16_to_cpu(args
.v3
.usVoltageLevel
);
1175 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1182 int amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(struct amdgpu_device
*adev
,
1183 u16
*vddc
, u16
*vddci
,
1184 u16 virtual_voltage_id
,
1185 u16 vbios_voltage_id
)
1187 int index
= GetIndexIntoMasterTable(DATA
, ASIC_ProfilingInfo
);
1189 u16 data_offset
, size
;
1191 ATOM_ASIC_PROFILING_INFO_V2_1
*profile
;
1192 u16
*leakage_bin
, *vddc_id_buf
, *vddc_buf
, *vddci_id_buf
, *vddci_buf
;
1197 if (!amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1198 &frev
, &crev
, &data_offset
))
1201 profile
= (ATOM_ASIC_PROFILING_INFO_V2_1
*)
1202 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1210 if (size
< sizeof(ATOM_ASIC_PROFILING_INFO_V2_1
))
1212 leakage_bin
= (u16
*)
1213 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1214 le16_to_cpu(profile
->usLeakageBinArrayOffset
));
1215 vddc_id_buf
= (u16
*)
1216 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1217 le16_to_cpu(profile
->usElbVDDC_IdArrayOffset
));
1219 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1220 le16_to_cpu(profile
->usElbVDDC_LevelArrayOffset
));
1221 vddci_id_buf
= (u16
*)
1222 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1223 le16_to_cpu(profile
->usElbVDDCI_IdArrayOffset
));
1225 (adev
->mode_info
.atom_context
->bios
+ data_offset
+
1226 le16_to_cpu(profile
->usElbVDDCI_LevelArrayOffset
));
1228 if (profile
->ucElbVDDC_Num
> 0) {
1229 for (i
= 0; i
< profile
->ucElbVDDC_Num
; i
++) {
1230 if (vddc_id_buf
[i
] == virtual_voltage_id
) {
1231 for (j
= 0; j
< profile
->ucLeakageBinNum
; j
++) {
1232 if (vbios_voltage_id
<= leakage_bin
[j
]) {
1233 *vddc
= vddc_buf
[j
* profile
->ucElbVDDC_Num
+ i
];
1241 if (profile
->ucElbVDDCI_Num
> 0) {
1242 for (i
= 0; i
< profile
->ucElbVDDCI_Num
; i
++) {
1243 if (vddci_id_buf
[i
] == virtual_voltage_id
) {
1244 for (j
= 0; j
< profile
->ucLeakageBinNum
; j
++) {
1245 if (vbios_voltage_id
<= leakage_bin
[j
]) {
1246 *vddci
= vddci_buf
[j
* profile
->ucElbVDDCI_Num
+ i
];
1256 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1261 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1268 union get_voltage_info
{
1269 struct _GET_VOLTAGE_INFO_INPUT_PARAMETER_V1_2 in
;
1270 struct _GET_EVV_VOLTAGE_INFO_OUTPUT_PARAMETER_V1_2 evv_out
;
1273 int amdgpu_atombios_get_voltage_evv(struct amdgpu_device
*adev
,
1274 u16 virtual_voltage_id
,
1277 int index
= GetIndexIntoMasterTable(COMMAND
, GetVoltageInfo
);
1279 u32 count
= adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
;
1280 union get_voltage_info args
;
1282 for (entry_id
= 0; entry_id
< count
; entry_id
++) {
1283 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[entry_id
].v
==
1288 if (entry_id
>= count
)
1291 args
.in
.ucVoltageType
= VOLTAGE_TYPE_VDDC
;
1292 args
.in
.ucVoltageMode
= ATOM_GET_VOLTAGE_EVV_VOLTAGE
;
1293 args
.in
.usVoltageLevel
= cpu_to_le16(virtual_voltage_id
);
1294 args
.in
.ulSCLKFreq
=
1295 cpu_to_le32(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[entry_id
].clk
);
1297 amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
, (uint32_t *)&args
);
1299 *voltage
= le16_to_cpu(args
.evv_out
.usVoltageLevel
);
1304 union voltage_object_info
{
1305 struct _ATOM_VOLTAGE_OBJECT_INFO v1
;
1306 struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2
;
1307 struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3
;
1310 union voltage_object
{
1311 struct _ATOM_VOLTAGE_OBJECT v1
;
1312 struct _ATOM_VOLTAGE_OBJECT_V2 v2
;
1313 union _ATOM_VOLTAGE_OBJECT_V3 v3
;
1317 static ATOM_VOLTAGE_OBJECT_V3
*amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1
*v3
,
1318 u8 voltage_type
, u8 voltage_mode
)
1320 u32 size
= le16_to_cpu(v3
->sHeader
.usStructureSize
);
1321 u32 offset
= offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1
, asVoltageObj
[0]);
1322 u8
*start
= (u8
*)v3
;
1324 while (offset
< size
) {
1325 ATOM_VOLTAGE_OBJECT_V3
*vo
= (ATOM_VOLTAGE_OBJECT_V3
*)(start
+ offset
);
1326 if ((vo
->asGpioVoltageObj
.sHeader
.ucVoltageType
== voltage_type
) &&
1327 (vo
->asGpioVoltageObj
.sHeader
.ucVoltageMode
== voltage_mode
))
1329 offset
+= le16_to_cpu(vo
->asGpioVoltageObj
.sHeader
.usSize
);
1335 amdgpu_atombios_is_voltage_gpio(struct amdgpu_device
*adev
,
1336 u8 voltage_type
, u8 voltage_mode
)
1338 int index
= GetIndexIntoMasterTable(DATA
, VoltageObjectInfo
);
1340 u16 data_offset
, size
;
1341 union voltage_object_info
*voltage_info
;
1343 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1344 &frev
, &crev
, &data_offset
)) {
1345 voltage_info
= (union voltage_object_info
*)
1346 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1352 if (amdgpu_atombios_lookup_voltage_object_v3(&voltage_info
->v3
,
1353 voltage_type
, voltage_mode
))
1357 DRM_ERROR("unknown voltage object table\n");
1362 DRM_ERROR("unknown voltage object table\n");
1370 int amdgpu_atombios_get_voltage_table(struct amdgpu_device
*adev
,
1371 u8 voltage_type
, u8 voltage_mode
,
1372 struct atom_voltage_table
*voltage_table
)
1374 int index
= GetIndexIntoMasterTable(DATA
, VoltageObjectInfo
);
1376 u16 data_offset
, size
;
1378 union voltage_object_info
*voltage_info
;
1379 union voltage_object
*voltage_object
= NULL
;
1381 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1382 &frev
, &crev
, &data_offset
)) {
1383 voltage_info
= (union voltage_object_info
*)
1384 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1390 voltage_object
= (union voltage_object
*)
1391 amdgpu_atombios_lookup_voltage_object_v3(&voltage_info
->v3
,
1392 voltage_type
, voltage_mode
);
1393 if (voltage_object
) {
1394 ATOM_GPIO_VOLTAGE_OBJECT_V3
*gpio
=
1395 &voltage_object
->v3
.asGpioVoltageObj
;
1396 VOLTAGE_LUT_ENTRY_V2
*lut
;
1397 if (gpio
->ucGpioEntryNum
> MAX_VOLTAGE_ENTRIES
)
1399 lut
= &gpio
->asVolGpioLut
[0];
1400 for (i
= 0; i
< gpio
->ucGpioEntryNum
; i
++) {
1401 voltage_table
->entries
[i
].value
=
1402 le16_to_cpu(lut
->usVoltageValue
);
1403 voltage_table
->entries
[i
].smio_low
=
1404 le32_to_cpu(lut
->ulVoltageId
);
1405 lut
= (VOLTAGE_LUT_ENTRY_V2
*)
1406 ((u8
*)lut
+ sizeof(VOLTAGE_LUT_ENTRY_V2
));
1408 voltage_table
->mask_low
= le32_to_cpu(gpio
->ulGpioMaskVal
);
1409 voltage_table
->count
= gpio
->ucGpioEntryNum
;
1410 voltage_table
->phase_delay
= gpio
->ucPhaseDelay
;
1415 DRM_ERROR("unknown voltage object table\n");
1420 DRM_ERROR("unknown voltage object table\n");
1428 struct _ATOM_VRAM_INFO_V3 v1_3
;
1429 struct _ATOM_VRAM_INFO_V4 v1_4
;
1430 struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1
;
1433 #define MEM_ID_MASK 0xff000000
1434 #define MEM_ID_SHIFT 24
1435 #define CLOCK_RANGE_MASK 0x00ffffff
1436 #define CLOCK_RANGE_SHIFT 0
1437 #define LOW_NIBBLE_MASK 0xf
1438 #define DATA_EQU_PREV 0
1439 #define DATA_FROM_TABLE 4
1441 int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device
*adev
,
1443 struct atom_mc_reg_table
*reg_table
)
1445 int index
= GetIndexIntoMasterTable(DATA
, VRAM_Info
);
1446 u8 frev
, crev
, num_entries
, t_mem_id
, num_ranges
= 0;
1448 u16 data_offset
, size
;
1449 union vram_info
*vram_info
;
1451 memset(reg_table
, 0, sizeof(struct atom_mc_reg_table
));
1453 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1454 &frev
, &crev
, &data_offset
)) {
1455 vram_info
= (union vram_info
*)
1456 (adev
->mode_info
.atom_context
->bios
+ data_offset
);
1459 DRM_ERROR("old table version %d, %d\n", frev
, crev
);
1464 if (module_index
< vram_info
->v2_1
.ucNumOfVRAMModule
) {
1465 ATOM_INIT_REG_BLOCK
*reg_block
=
1466 (ATOM_INIT_REG_BLOCK
*)
1467 ((u8
*)vram_info
+ le16_to_cpu(vram_info
->v2_1
.usMemClkPatchTblOffset
));
1468 ATOM_MEMORY_SETTING_DATA_BLOCK
*reg_data
=
1469 (ATOM_MEMORY_SETTING_DATA_BLOCK
*)
1470 ((u8
*)reg_block
+ (2 * sizeof(u16
)) +
1471 le16_to_cpu(reg_block
->usRegIndexTblSize
));
1472 ATOM_INIT_REG_INDEX_FORMAT
*format
= ®_block
->asRegIndexBuf
[0];
1473 num_entries
= (u8
)((le16_to_cpu(reg_block
->usRegIndexTblSize
)) /
1474 sizeof(ATOM_INIT_REG_INDEX_FORMAT
)) - 1;
1475 if (num_entries
> VBIOS_MC_REGISTER_ARRAY_SIZE
)
1477 while (i
< num_entries
) {
1478 if (format
->ucPreRegDataLength
& ACCESS_PLACEHOLDER
)
1480 reg_table
->mc_reg_address
[i
].s1
=
1481 (u16
)(le16_to_cpu(format
->usRegIndex
));
1482 reg_table
->mc_reg_address
[i
].pre_reg_data
=
1483 (u8
)(format
->ucPreRegDataLength
);
1485 format
= (ATOM_INIT_REG_INDEX_FORMAT
*)
1486 ((u8
*)format
+ sizeof(ATOM_INIT_REG_INDEX_FORMAT
));
1488 reg_table
->last
= i
;
1489 while ((le32_to_cpu(*(u32
*)reg_data
) != END_OF_REG_DATA_BLOCK
) &&
1490 (num_ranges
< VBIOS_MAX_AC_TIMING_ENTRIES
)) {
1491 t_mem_id
= (u8
)((le32_to_cpu(*(u32
*)reg_data
) & MEM_ID_MASK
)
1493 if (module_index
== t_mem_id
) {
1494 reg_table
->mc_reg_table_entry
[num_ranges
].mclk_max
=
1495 (u32
)((le32_to_cpu(*(u32
*)reg_data
) & CLOCK_RANGE_MASK
)
1496 >> CLOCK_RANGE_SHIFT
);
1497 for (i
= 0, j
= 1; i
< reg_table
->last
; i
++) {
1498 if ((reg_table
->mc_reg_address
[i
].pre_reg_data
& LOW_NIBBLE_MASK
) == DATA_FROM_TABLE
) {
1499 reg_table
->mc_reg_table_entry
[num_ranges
].mc_data
[i
] =
1500 (u32
)le32_to_cpu(*((u32
*)reg_data
+ j
));
1502 } else if ((reg_table
->mc_reg_address
[i
].pre_reg_data
& LOW_NIBBLE_MASK
) == DATA_EQU_PREV
) {
1503 reg_table
->mc_reg_table_entry
[num_ranges
].mc_data
[i
] =
1504 reg_table
->mc_reg_table_entry
[num_ranges
].mc_data
[i
- 1];
1509 reg_data
= (ATOM_MEMORY_SETTING_DATA_BLOCK
*)
1510 ((u8
*)reg_data
+ le16_to_cpu(reg_block
->usRegDataBlkSize
));
1512 if (le32_to_cpu(*(u32
*)reg_data
) != END_OF_REG_DATA_BLOCK
)
1514 reg_table
->num_entries
= num_ranges
;
1519 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1524 DRM_ERROR("Unknown table version %d, %d\n", frev
, crev
);
1532 bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device
*adev
)
1534 int index
= GetIndexIntoMasterTable(DATA
, GPUVirtualizationInfo
);
1536 u16 data_offset
, size
;
1538 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
1539 &frev
, &crev
, &data_offset
))
1545 void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device
*adev
, bool lock
)
1547 uint32_t bios_6_scratch
;
1549 bios_6_scratch
= RREG32(mmBIOS_SCRATCH_6
);
1552 bios_6_scratch
|= ATOM_S6_CRITICAL_STATE
;
1553 bios_6_scratch
&= ~ATOM_S6_ACC_MODE
;
1555 bios_6_scratch
&= ~ATOM_S6_CRITICAL_STATE
;
1556 bios_6_scratch
|= ATOM_S6_ACC_MODE
;
1559 WREG32(mmBIOS_SCRATCH_6
, bios_6_scratch
);
1562 void amdgpu_atombios_scratch_regs_init(struct amdgpu_device
*adev
)
1564 uint32_t bios_2_scratch
, bios_6_scratch
;
1566 bios_2_scratch
= RREG32(mmBIOS_SCRATCH_2
);
1567 bios_6_scratch
= RREG32(mmBIOS_SCRATCH_6
);
1569 /* let the bios control the backlight */
1570 bios_2_scratch
&= ~ATOM_S2_VRI_BRIGHT_ENABLE
;
1572 /* tell the bios not to handle mode switching */
1573 bios_6_scratch
|= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH
;
1575 /* clear the vbios dpms state */
1576 bios_2_scratch
&= ~ATOM_S2_DEVICE_DPMS_STATE
;
1578 WREG32(mmBIOS_SCRATCH_2
, bios_2_scratch
);
1579 WREG32(mmBIOS_SCRATCH_6
, bios_6_scratch
);
1582 void amdgpu_atombios_scratch_regs_save(struct amdgpu_device
*adev
)
1586 for (i
= 0; i
< AMDGPU_BIOS_NUM_SCRATCH
; i
++)
1587 adev
->bios_scratch
[i
] = RREG32(mmBIOS_SCRATCH_0
+ i
);
1590 void amdgpu_atombios_scratch_regs_restore(struct amdgpu_device
*adev
)
1594 for (i
= 0; i
< AMDGPU_BIOS_NUM_SCRATCH
; i
++)
1595 WREG32(mmBIOS_SCRATCH_0
+ i
, adev
->bios_scratch
[i
]);
1598 /* Atom needs data in little endian format
1599 * so swap as appropriate when copying data to
1600 * or from atom. Note that atom operates on
1603 void amdgpu_atombios_copy_swap(u8
*dst
, u8
*src
, u8 num_bytes
, bool to_le
)
1606 u8 src_tmp
[20], dst_tmp
[20]; /* used for byteswapping */
1610 memcpy(src_tmp
, src
, num_bytes
);
1611 src32
= (u32
*)src_tmp
;
1612 dst32
= (u32
*)dst_tmp
;
1614 for (i
= 0; i
< ((num_bytes
+ 3) / 4); i
++)
1615 dst32
[i
] = cpu_to_le32(src32
[i
]);
1616 memcpy(dst
, dst_tmp
, num_bytes
);
1618 u8 dws
= num_bytes
& ~3;
1619 for (i
= 0; i
< ((num_bytes
+ 3) / 4); i
++)
1620 dst32
[i
] = le32_to_cpu(src32
[i
]);
1621 memcpy(dst
, dst_tmp
, dws
);
1622 if (num_bytes
% 4) {
1623 for (i
= 0; i
< (num_bytes
% 4); i
++)
1624 dst
[dws
+i
] = dst_tmp
[dws
+i
];
1628 memcpy(dst
, src
, num_bytes
);