2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Christian König <christian.koenig@amd.com>
25 #include <linux/firmware.h>
28 #include "amdgpu_uvd.h"
30 #include "uvd/uvd_6_0_d.h"
31 #include "uvd/uvd_6_0_sh_mask.h"
32 #include "oss/oss_2_0_d.h"
33 #include "oss/oss_2_0_sh_mask.h"
35 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device
*adev
);
36 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device
*adev
);
37 static int uvd_v6_0_start(struct amdgpu_device
*adev
);
38 static void uvd_v6_0_stop(struct amdgpu_device
*adev
);
41 * uvd_v6_0_ring_get_rptr - get read pointer
43 * @ring: amdgpu_ring pointer
45 * Returns the current hardware read pointer
47 static uint32_t uvd_v6_0_ring_get_rptr(struct amdgpu_ring
*ring
)
49 struct amdgpu_device
*adev
= ring
->adev
;
51 return RREG32(mmUVD_RBC_RB_RPTR
);
55 * uvd_v6_0_ring_get_wptr - get write pointer
57 * @ring: amdgpu_ring pointer
59 * Returns the current hardware write pointer
61 static uint32_t uvd_v6_0_ring_get_wptr(struct amdgpu_ring
*ring
)
63 struct amdgpu_device
*adev
= ring
->adev
;
65 return RREG32(mmUVD_RBC_RB_WPTR
);
69 * uvd_v6_0_ring_set_wptr - set write pointer
71 * @ring: amdgpu_ring pointer
73 * Commits the write pointer to the hardware
75 static void uvd_v6_0_ring_set_wptr(struct amdgpu_ring
*ring
)
77 struct amdgpu_device
*adev
= ring
->adev
;
79 WREG32(mmUVD_RBC_RB_WPTR
, ring
->wptr
);
82 static int uvd_v6_0_early_init(void *handle
)
84 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
86 uvd_v6_0_set_ring_funcs(adev
);
87 uvd_v6_0_set_irq_funcs(adev
);
92 static int uvd_v6_0_sw_init(void *handle
)
94 struct amdgpu_ring
*ring
;
96 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
99 r
= amdgpu_irq_add_id(adev
, 124, &adev
->uvd
.irq
);
103 r
= amdgpu_uvd_sw_init(adev
);
107 r
= amdgpu_uvd_resume(adev
);
111 ring
= &adev
->uvd
.ring
;
112 sprintf(ring
->name
, "uvd");
113 r
= amdgpu_ring_init(adev
, ring
, 4096, CP_PACKET2
, 0xf,
114 &adev
->uvd
.irq
, 0, AMDGPU_RING_TYPE_UVD
);
119 static int uvd_v6_0_sw_fini(void *handle
)
122 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
124 r
= amdgpu_uvd_suspend(adev
);
128 r
= amdgpu_uvd_sw_fini(adev
);
136 * uvd_v6_0_hw_init - start and test UVD block
138 * @adev: amdgpu_device pointer
140 * Initialize the hardware, boot up the VCPU and do some testing
142 static int uvd_v6_0_hw_init(void *handle
)
144 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
145 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
149 r
= uvd_v6_0_start(adev
);
154 r
= amdgpu_ring_test_ring(ring
);
160 r
= amdgpu_ring_lock(ring
, 10);
162 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r
);
166 tmp
= PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
, 0);
167 amdgpu_ring_write(ring
, tmp
);
168 amdgpu_ring_write(ring
, 0xFFFFF);
170 tmp
= PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
, 0);
171 amdgpu_ring_write(ring
, tmp
);
172 amdgpu_ring_write(ring
, 0xFFFFF);
174 tmp
= PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
, 0);
175 amdgpu_ring_write(ring
, tmp
);
176 amdgpu_ring_write(ring
, 0xFFFFF);
178 /* Clear timeout status bits */
179 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS
, 0));
180 amdgpu_ring_write(ring
, 0x8);
182 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_CNTL
, 0));
183 amdgpu_ring_write(ring
, 3);
185 amdgpu_ring_unlock_commit(ring
);
189 DRM_INFO("UVD initialized successfully.\n");
195 * uvd_v6_0_hw_fini - stop the hardware block
197 * @adev: amdgpu_device pointer
199 * Stop the UVD block, mark ring as not ready any more
201 static int uvd_v6_0_hw_fini(void *handle
)
203 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
204 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
212 static int uvd_v6_0_suspend(void *handle
)
215 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
217 /* Skip this for APU for now */
218 if (!(adev
->flags
& AMD_IS_APU
)) {
219 r
= amdgpu_uvd_suspend(adev
);
223 r
= uvd_v6_0_hw_fini(adev
);
230 static int uvd_v6_0_resume(void *handle
)
233 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
235 /* Skip this for APU for now */
236 if (!(adev
->flags
& AMD_IS_APU
)) {
237 r
= amdgpu_uvd_resume(adev
);
241 r
= uvd_v6_0_hw_init(adev
);
249 * uvd_v6_0_mc_resume - memory controller programming
251 * @adev: amdgpu_device pointer
253 * Let the UVD memory controller know it's offsets
255 static void uvd_v6_0_mc_resume(struct amdgpu_device
*adev
)
260 /* programm memory controller bits 0-27 */
261 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW
,
262 lower_32_bits(adev
->uvd
.gpu_addr
));
263 WREG32(mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH
,
264 upper_32_bits(adev
->uvd
.gpu_addr
));
266 offset
= AMDGPU_UVD_FIRMWARE_OFFSET
;
267 size
= AMDGPU_GPU_PAGE_ALIGN(adev
->uvd
.fw
->size
+ 4);
268 WREG32(mmUVD_VCPU_CACHE_OFFSET0
, offset
>> 3);
269 WREG32(mmUVD_VCPU_CACHE_SIZE0
, size
);
272 size
= AMDGPU_UVD_STACK_SIZE
;
273 WREG32(mmUVD_VCPU_CACHE_OFFSET1
, offset
>> 3);
274 WREG32(mmUVD_VCPU_CACHE_SIZE1
, size
);
277 size
= AMDGPU_UVD_HEAP_SIZE
;
278 WREG32(mmUVD_VCPU_CACHE_OFFSET2
, offset
>> 3);
279 WREG32(mmUVD_VCPU_CACHE_SIZE2
, size
);
282 static void cz_set_uvd_clock_gating_branches(struct amdgpu_device
*adev
,
287 data
= RREG32(mmUVD_CGC_GATE
);
288 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
290 data
|= UVD_CGC_GATE__SYS_MASK
|
291 UVD_CGC_GATE__UDEC_MASK
|
292 UVD_CGC_GATE__MPEG2_MASK
|
293 UVD_CGC_GATE__RBC_MASK
|
294 UVD_CGC_GATE__LMI_MC_MASK
|
295 UVD_CGC_GATE__IDCT_MASK
|
296 UVD_CGC_GATE__MPRD_MASK
|
297 UVD_CGC_GATE__MPC_MASK
|
298 UVD_CGC_GATE__LBSI_MASK
|
299 UVD_CGC_GATE__LRBBM_MASK
|
300 UVD_CGC_GATE__UDEC_RE_MASK
|
301 UVD_CGC_GATE__UDEC_CM_MASK
|
302 UVD_CGC_GATE__UDEC_IT_MASK
|
303 UVD_CGC_GATE__UDEC_DB_MASK
|
304 UVD_CGC_GATE__UDEC_MP_MASK
|
305 UVD_CGC_GATE__WCB_MASK
|
306 UVD_CGC_GATE__VCPU_MASK
|
307 UVD_CGC_GATE__SCPU_MASK
;
308 data1
|= UVD_SUVD_CGC_GATE__SRE_MASK
|
309 UVD_SUVD_CGC_GATE__SIT_MASK
|
310 UVD_SUVD_CGC_GATE__SMP_MASK
|
311 UVD_SUVD_CGC_GATE__SCM_MASK
|
312 UVD_SUVD_CGC_GATE__SDB_MASK
|
313 UVD_SUVD_CGC_GATE__SRE_H264_MASK
|
314 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
|
315 UVD_SUVD_CGC_GATE__SIT_H264_MASK
|
316 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
|
317 UVD_SUVD_CGC_GATE__SCM_H264_MASK
|
318 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
|
319 UVD_SUVD_CGC_GATE__SDB_H264_MASK
|
320 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
;
322 data
&= ~(UVD_CGC_GATE__SYS_MASK
|
323 UVD_CGC_GATE__UDEC_MASK
|
324 UVD_CGC_GATE__MPEG2_MASK
|
325 UVD_CGC_GATE__RBC_MASK
|
326 UVD_CGC_GATE__LMI_MC_MASK
|
327 UVD_CGC_GATE__LMI_UMC_MASK
|
328 UVD_CGC_GATE__IDCT_MASK
|
329 UVD_CGC_GATE__MPRD_MASK
|
330 UVD_CGC_GATE__MPC_MASK
|
331 UVD_CGC_GATE__LBSI_MASK
|
332 UVD_CGC_GATE__LRBBM_MASK
|
333 UVD_CGC_GATE__UDEC_RE_MASK
|
334 UVD_CGC_GATE__UDEC_CM_MASK
|
335 UVD_CGC_GATE__UDEC_IT_MASK
|
336 UVD_CGC_GATE__UDEC_DB_MASK
|
337 UVD_CGC_GATE__UDEC_MP_MASK
|
338 UVD_CGC_GATE__WCB_MASK
|
339 UVD_CGC_GATE__VCPU_MASK
|
340 UVD_CGC_GATE__SCPU_MASK
);
341 data1
&= ~(UVD_SUVD_CGC_GATE__SRE_MASK
|
342 UVD_SUVD_CGC_GATE__SIT_MASK
|
343 UVD_SUVD_CGC_GATE__SMP_MASK
|
344 UVD_SUVD_CGC_GATE__SCM_MASK
|
345 UVD_SUVD_CGC_GATE__SDB_MASK
|
346 UVD_SUVD_CGC_GATE__SRE_H264_MASK
|
347 UVD_SUVD_CGC_GATE__SRE_HEVC_MASK
|
348 UVD_SUVD_CGC_GATE__SIT_H264_MASK
|
349 UVD_SUVD_CGC_GATE__SIT_HEVC_MASK
|
350 UVD_SUVD_CGC_GATE__SCM_H264_MASK
|
351 UVD_SUVD_CGC_GATE__SCM_HEVC_MASK
|
352 UVD_SUVD_CGC_GATE__SDB_H264_MASK
|
353 UVD_SUVD_CGC_GATE__SDB_HEVC_MASK
);
355 WREG32(mmUVD_CGC_GATE
, data
);
356 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
359 static void tonga_set_uvd_clock_gating_branches(struct amdgpu_device
*adev
,
364 data
= RREG32(mmUVD_CGC_GATE
);
365 data1
= RREG32(mmUVD_SUVD_CGC_GATE
);
367 data
|= UVD_CGC_GATE__SYS_MASK
|
368 UVD_CGC_GATE__UDEC_MASK
|
369 UVD_CGC_GATE__MPEG2_MASK
|
370 UVD_CGC_GATE__RBC_MASK
|
371 UVD_CGC_GATE__LMI_MC_MASK
|
372 UVD_CGC_GATE__IDCT_MASK
|
373 UVD_CGC_GATE__MPRD_MASK
|
374 UVD_CGC_GATE__MPC_MASK
|
375 UVD_CGC_GATE__LBSI_MASK
|
376 UVD_CGC_GATE__LRBBM_MASK
|
377 UVD_CGC_GATE__UDEC_RE_MASK
|
378 UVD_CGC_GATE__UDEC_CM_MASK
|
379 UVD_CGC_GATE__UDEC_IT_MASK
|
380 UVD_CGC_GATE__UDEC_DB_MASK
|
381 UVD_CGC_GATE__UDEC_MP_MASK
|
382 UVD_CGC_GATE__WCB_MASK
|
383 UVD_CGC_GATE__VCPU_MASK
|
384 UVD_CGC_GATE__SCPU_MASK
;
385 data1
|= UVD_SUVD_CGC_GATE__SRE_MASK
|
386 UVD_SUVD_CGC_GATE__SIT_MASK
|
387 UVD_SUVD_CGC_GATE__SMP_MASK
|
388 UVD_SUVD_CGC_GATE__SCM_MASK
|
389 UVD_SUVD_CGC_GATE__SDB_MASK
;
391 data
&= ~(UVD_CGC_GATE__SYS_MASK
|
392 UVD_CGC_GATE__UDEC_MASK
|
393 UVD_CGC_GATE__MPEG2_MASK
|
394 UVD_CGC_GATE__RBC_MASK
|
395 UVD_CGC_GATE__LMI_MC_MASK
|
396 UVD_CGC_GATE__LMI_UMC_MASK
|
397 UVD_CGC_GATE__IDCT_MASK
|
398 UVD_CGC_GATE__MPRD_MASK
|
399 UVD_CGC_GATE__MPC_MASK
|
400 UVD_CGC_GATE__LBSI_MASK
|
401 UVD_CGC_GATE__LRBBM_MASK
|
402 UVD_CGC_GATE__UDEC_RE_MASK
|
403 UVD_CGC_GATE__UDEC_CM_MASK
|
404 UVD_CGC_GATE__UDEC_IT_MASK
|
405 UVD_CGC_GATE__UDEC_DB_MASK
|
406 UVD_CGC_GATE__UDEC_MP_MASK
|
407 UVD_CGC_GATE__WCB_MASK
|
408 UVD_CGC_GATE__VCPU_MASK
|
409 UVD_CGC_GATE__SCPU_MASK
);
410 data1
&= ~(UVD_SUVD_CGC_GATE__SRE_MASK
|
411 UVD_SUVD_CGC_GATE__SIT_MASK
|
412 UVD_SUVD_CGC_GATE__SMP_MASK
|
413 UVD_SUVD_CGC_GATE__SCM_MASK
|
414 UVD_SUVD_CGC_GATE__SDB_MASK
);
416 WREG32(mmUVD_CGC_GATE
, data
);
417 WREG32(mmUVD_SUVD_CGC_GATE
, data1
);
420 static void uvd_v6_0_set_uvd_dynamic_clock_mode(struct amdgpu_device
*adev
,
423 u32 data
, data1
= 0, data2
;
425 /* Always un-gate UVD REGS bit */
426 data
= RREG32(mmUVD_CGC_GATE
);
427 data
&= ~(UVD_CGC_GATE__REGS_MASK
);
428 WREG32(mmUVD_CGC_GATE
, data
);
430 data
= RREG32(mmUVD_CGC_CTRL
);
431 data
&= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK
|
432 UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK
);
433 data
|= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
|
434 1 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_GATE_DLY_TIMER
) |
435 4 << REG_FIELD_SHIFT(UVD_CGC_CTRL
, CLK_OFF_DELAY
);
437 data2
= RREG32(mmUVD_SUVD_CGC_CTRL
);
439 data
&= ~(UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
440 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
441 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
442 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
443 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
444 UVD_CGC_CTRL__SYS_MODE_MASK
|
445 UVD_CGC_CTRL__UDEC_MODE_MASK
|
446 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
447 UVD_CGC_CTRL__REGS_MODE_MASK
|
448 UVD_CGC_CTRL__RBC_MODE_MASK
|
449 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
450 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
451 UVD_CGC_CTRL__IDCT_MODE_MASK
|
452 UVD_CGC_CTRL__MPRD_MODE_MASK
|
453 UVD_CGC_CTRL__MPC_MODE_MASK
|
454 UVD_CGC_CTRL__LBSI_MODE_MASK
|
455 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
456 UVD_CGC_CTRL__WCB_MODE_MASK
|
457 UVD_CGC_CTRL__VCPU_MODE_MASK
|
458 UVD_CGC_CTRL__JPEG_MODE_MASK
|
459 UVD_CGC_CTRL__SCPU_MODE_MASK
);
460 data1
|= UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK
|
461 UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK
;
462 data1
&= ~UVD_CGC_CTRL2__GATER_DIV_ID_MASK
;
463 data1
|= 7 << REG_FIELD_SHIFT(UVD_CGC_CTRL2
, GATER_DIV_ID
);
464 data2
&= ~(UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
|
465 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
|
466 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
|
467 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
|
468 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
);
470 data
|= UVD_CGC_CTRL__UDEC_RE_MODE_MASK
|
471 UVD_CGC_CTRL__UDEC_CM_MODE_MASK
|
472 UVD_CGC_CTRL__UDEC_IT_MODE_MASK
|
473 UVD_CGC_CTRL__UDEC_DB_MODE_MASK
|
474 UVD_CGC_CTRL__UDEC_MP_MODE_MASK
|
475 UVD_CGC_CTRL__SYS_MODE_MASK
|
476 UVD_CGC_CTRL__UDEC_MODE_MASK
|
477 UVD_CGC_CTRL__MPEG2_MODE_MASK
|
478 UVD_CGC_CTRL__REGS_MODE_MASK
|
479 UVD_CGC_CTRL__RBC_MODE_MASK
|
480 UVD_CGC_CTRL__LMI_MC_MODE_MASK
|
481 UVD_CGC_CTRL__LMI_UMC_MODE_MASK
|
482 UVD_CGC_CTRL__IDCT_MODE_MASK
|
483 UVD_CGC_CTRL__MPRD_MODE_MASK
|
484 UVD_CGC_CTRL__MPC_MODE_MASK
|
485 UVD_CGC_CTRL__LBSI_MODE_MASK
|
486 UVD_CGC_CTRL__LRBBM_MODE_MASK
|
487 UVD_CGC_CTRL__WCB_MODE_MASK
|
488 UVD_CGC_CTRL__VCPU_MODE_MASK
|
489 UVD_CGC_CTRL__SCPU_MODE_MASK
;
490 data2
|= UVD_SUVD_CGC_CTRL__SRE_MODE_MASK
|
491 UVD_SUVD_CGC_CTRL__SIT_MODE_MASK
|
492 UVD_SUVD_CGC_CTRL__SMP_MODE_MASK
|
493 UVD_SUVD_CGC_CTRL__SCM_MODE_MASK
|
494 UVD_SUVD_CGC_CTRL__SDB_MODE_MASK
;
496 WREG32(mmUVD_CGC_CTRL
, data
);
497 WREG32(mmUVD_SUVD_CGC_CTRL
, data2
);
499 data
= RREG32_UVD_CTX(ixUVD_CGC_CTRL2
);
500 data
&= ~(REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_OCLK_RAMP_EN
) |
501 REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_RCLK_RAMP_EN
) |
502 REG_FIELD_MASK(UVD_CGC_CTRL2
, GATER_DIV_ID
));
503 data1
&= (REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_OCLK_RAMP_EN
) |
504 REG_FIELD_MASK(UVD_CGC_CTRL2
, DYN_RCLK_RAMP_EN
) |
505 REG_FIELD_MASK(UVD_CGC_CTRL2
, GATER_DIV_ID
));
507 WREG32_UVD_CTX(ixUVD_CGC_CTRL2
, data
);
511 * uvd_v6_0_start - start UVD block
513 * @adev: amdgpu_device pointer
515 * Setup and start the UVD block
517 static int uvd_v6_0_start(struct amdgpu_device
*adev
)
519 struct amdgpu_ring
*ring
= &adev
->uvd
.ring
;
520 uint32_t rb_bufsz
, tmp
;
521 uint32_t lmi_swap_cntl
;
522 uint32_t mp_swap_cntl
;
526 WREG32_P(mmUVD_POWER_STATUS
, 0, ~(1 << 2));
528 /* disable byte swapping */
532 uvd_v6_0_mc_resume(adev
);
534 /* Set dynamic clock gating in S/W control mode */
535 if (adev
->cg_flags
& AMDGPU_CG_SUPPORT_UVD_MGCG
) {
536 if (adev
->flags
& AMD_IS_APU
)
537 cz_set_uvd_clock_gating_branches(adev
, false);
539 tonga_set_uvd_clock_gating_branches(adev
, false);
540 uvd_v6_0_set_uvd_dynamic_clock_mode(adev
, true);
542 /* disable clock gating */
543 uint32_t data
= RREG32(mmUVD_CGC_CTRL
);
544 data
&= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
545 WREG32(mmUVD_CGC_CTRL
, data
);
548 /* disable interupt */
549 WREG32_P(mmUVD_MASTINT_EN
, 0, ~(1 << 1));
551 /* stall UMC and register bus before resetting VCPU */
552 WREG32_P(mmUVD_LMI_CTRL2
, 1 << 8, ~(1 << 8));
555 /* put LMI, VCPU, RBC etc... into reset */
556 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK
|
557 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
| UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK
|
558 UVD_SOFT_RESET__RBC_SOFT_RESET_MASK
| UVD_SOFT_RESET__CSM_SOFT_RESET_MASK
|
559 UVD_SOFT_RESET__CXW_SOFT_RESET_MASK
| UVD_SOFT_RESET__TAP_SOFT_RESET_MASK
|
560 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK
);
563 /* take UVD block out of reset */
564 WREG32_P(mmSRBM_SOFT_RESET
, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
);
567 /* initialize UVD memory controller */
568 WREG32(mmUVD_LMI_CTRL
, 0x40 | (1 << 8) | (1 << 13) |
569 (1 << 21) | (1 << 9) | (1 << 20));
572 /* swap (8 in 32) RB and IB */
576 WREG32(mmUVD_LMI_SWAP_CNTL
, lmi_swap_cntl
);
577 WREG32(mmUVD_MP_SWAP_CNTL
, mp_swap_cntl
);
579 WREG32(mmUVD_MPC_SET_MUXA0
, 0x40c2040);
580 WREG32(mmUVD_MPC_SET_MUXA1
, 0x0);
581 WREG32(mmUVD_MPC_SET_MUXB0
, 0x40c2040);
582 WREG32(mmUVD_MPC_SET_MUXB1
, 0x0);
583 WREG32(mmUVD_MPC_SET_ALU
, 0);
584 WREG32(mmUVD_MPC_SET_MUX
, 0x88);
586 /* take all subblocks out of reset, except VCPU */
587 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
590 /* enable VCPU clock */
591 WREG32(mmUVD_VCPU_CNTL
, 1 << 9);
594 WREG32_P(mmUVD_LMI_CTRL2
, 0, ~(1 << 8));
596 /* boot up the VCPU */
597 WREG32(mmUVD_SOFT_RESET
, 0);
600 for (i
= 0; i
< 10; ++i
) {
603 for (j
= 0; j
< 100; ++j
) {
604 status
= RREG32(mmUVD_STATUS
);
613 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
614 WREG32_P(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
,
615 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
617 WREG32_P(mmUVD_SOFT_RESET
, 0,
618 ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
624 DRM_ERROR("UVD not responding, giving up!!!\n");
627 /* enable master interrupt */
628 WREG32_P(mmUVD_MASTINT_EN
, 3 << 1, ~(3 << 1));
630 /* clear the bit 4 of UVD_STATUS */
631 WREG32_P(mmUVD_STATUS
, 0, ~(2 << 1));
633 rb_bufsz
= order_base_2(ring
->ring_size
);
635 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BUFSZ
, rb_bufsz
);
636 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_BLKSZ
, 1);
637 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_FETCH
, 1);
638 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_WPTR_POLL_EN
, 0);
639 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_NO_UPDATE
, 1);
640 tmp
= REG_SET_FIELD(tmp
, UVD_RBC_RB_CNTL
, RB_RPTR_WR_EN
, 1);
641 /* force RBC into idle state */
642 WREG32(mmUVD_RBC_RB_CNTL
, tmp
);
644 /* set the write pointer delay */
645 WREG32(mmUVD_RBC_RB_WPTR_CNTL
, 0);
647 /* set the wb address */
648 WREG32(mmUVD_RBC_RB_RPTR_ADDR
, (upper_32_bits(ring
->gpu_addr
) >> 2));
650 /* programm the RB_BASE for ring buffer */
651 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_LOW
,
652 lower_32_bits(ring
->gpu_addr
));
653 WREG32(mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH
,
654 upper_32_bits(ring
->gpu_addr
));
656 /* Initialize the ring buffer's read and write pointers */
657 WREG32(mmUVD_RBC_RB_RPTR
, 0);
659 ring
->wptr
= RREG32(mmUVD_RBC_RB_RPTR
);
660 WREG32(mmUVD_RBC_RB_WPTR
, ring
->wptr
);
662 WREG32_P(mmUVD_RBC_RB_CNTL
, 0, ~UVD_RBC_RB_CNTL__RB_NO_FETCH_MASK
);
668 * uvd_v6_0_stop - stop UVD block
670 * @adev: amdgpu_device pointer
674 static void uvd_v6_0_stop(struct amdgpu_device
*adev
)
676 /* force RBC into idle state */
677 WREG32(mmUVD_RBC_RB_CNTL
, 0x11010101);
679 /* Stall UMC and register bus before resetting VCPU */
680 WREG32_P(mmUVD_LMI_CTRL2
, 1 << 8, ~(1 << 8));
683 /* put VCPU into reset */
684 WREG32(mmUVD_SOFT_RESET
, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK
);
687 /* disable VCPU clock */
688 WREG32(mmUVD_VCPU_CNTL
, 0x0);
690 /* Unstall UMC and register bus */
691 WREG32_P(mmUVD_LMI_CTRL2
, 0, ~(1 << 8));
695 * uvd_v6_0_ring_emit_fence - emit an fence & trap command
697 * @ring: amdgpu_ring pointer
698 * @fence: fence to emit
700 * Write a fence and a trap command to the ring.
702 static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
705 WARN_ON(flags
& AMDGPU_FENCE_FLAG_64BIT
);
707 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
708 amdgpu_ring_write(ring
, seq
);
709 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
710 amdgpu_ring_write(ring
, addr
& 0xffffffff);
711 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
712 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xff);
713 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
714 amdgpu_ring_write(ring
, 0);
716 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA0
, 0));
717 amdgpu_ring_write(ring
, 0);
718 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_DATA1
, 0));
719 amdgpu_ring_write(ring
, 0);
720 amdgpu_ring_write(ring
, PACKET0(mmUVD_GPCOM_VCPU_CMD
, 0));
721 amdgpu_ring_write(ring
, 2);
725 * uvd_v6_0_ring_emit_semaphore - emit semaphore command
727 * @ring: amdgpu_ring pointer
728 * @semaphore: semaphore to emit commands for
729 * @emit_wait: true if we should emit a wait command
731 * Emit a semaphore command (either wait or signal) to the UVD ring.
733 static bool uvd_v6_0_ring_emit_semaphore(struct amdgpu_ring
*ring
,
734 struct amdgpu_semaphore
*semaphore
,
737 uint64_t addr
= semaphore
->gpu_addr
;
739 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_ADDR_LOW
, 0));
740 amdgpu_ring_write(ring
, (addr
>> 3) & 0x000FFFFF);
742 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_ADDR_HIGH
, 0));
743 amdgpu_ring_write(ring
, (addr
>> 23) & 0x000FFFFF);
745 amdgpu_ring_write(ring
, PACKET0(mmUVD_SEMA_CMD
, 0));
746 amdgpu_ring_write(ring
, 0x80 | (emit_wait
? 1 : 0));
752 * uvd_v6_0_ring_test_ring - register write test
754 * @ring: amdgpu_ring pointer
756 * Test if we can successfully write to the context register
758 static int uvd_v6_0_ring_test_ring(struct amdgpu_ring
*ring
)
760 struct amdgpu_device
*adev
= ring
->adev
;
765 WREG32(mmUVD_CONTEXT_ID
, 0xCAFEDEAD);
766 r
= amdgpu_ring_lock(ring
, 3);
768 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n",
772 amdgpu_ring_write(ring
, PACKET0(mmUVD_CONTEXT_ID
, 0));
773 amdgpu_ring_write(ring
, 0xDEADBEEF);
774 amdgpu_ring_unlock_commit(ring
);
775 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
776 tmp
= RREG32(mmUVD_CONTEXT_ID
);
777 if (tmp
== 0xDEADBEEF)
782 if (i
< adev
->usec_timeout
) {
783 DRM_INFO("ring test on %d succeeded in %d usecs\n",
786 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
794 * uvd_v6_0_ring_emit_ib - execute indirect buffer
796 * @ring: amdgpu_ring pointer
797 * @ib: indirect buffer to execute
799 * Write ring commands to execute the indirect buffer
801 static void uvd_v6_0_ring_emit_ib(struct amdgpu_ring
*ring
,
802 struct amdgpu_ib
*ib
)
804 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_LOW
, 0));
805 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
));
806 amdgpu_ring_write(ring
, PACKET0(mmUVD_LMI_RBC_IB_64BIT_BAR_HIGH
, 0));
807 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
808 amdgpu_ring_write(ring
, PACKET0(mmUVD_RBC_IB_SIZE
, 0));
809 amdgpu_ring_write(ring
, ib
->length_dw
);
813 * uvd_v6_0_ring_test_ib - test ib execution
815 * @ring: amdgpu_ring pointer
817 * Test if we can successfully execute an IB
819 static int uvd_v6_0_ring_test_ib(struct amdgpu_ring
*ring
)
821 struct fence
*fence
= NULL
;
824 r
= amdgpu_uvd_get_create_msg(ring
, 1, NULL
);
826 DRM_ERROR("amdgpu: failed to get create msg (%d).\n", r
);
830 r
= amdgpu_uvd_get_destroy_msg(ring
, 1, &fence
);
832 DRM_ERROR("amdgpu: failed to get destroy ib (%d).\n", r
);
836 r
= fence_wait(fence
, false);
838 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
841 DRM_INFO("ib test on ring %d succeeded\n", ring
->idx
);
847 static bool uvd_v6_0_is_idle(void *handle
)
849 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
851 return !(RREG32(mmSRBM_STATUS
) & SRBM_STATUS__UVD_BUSY_MASK
);
854 static int uvd_v6_0_wait_for_idle(void *handle
)
857 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
859 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
860 if (!(RREG32(mmSRBM_STATUS
) & SRBM_STATUS__UVD_BUSY_MASK
))
866 static int uvd_v6_0_soft_reset(void *handle
)
868 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
872 WREG32_P(mmSRBM_SOFT_RESET
, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
,
873 ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK
);
876 return uvd_v6_0_start(adev
);
879 static void uvd_v6_0_print_status(void *handle
)
881 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
882 dev_info(adev
->dev
, "UVD 6.0 registers\n");
883 dev_info(adev
->dev
, " UVD_SEMA_ADDR_LOW=0x%08X\n",
884 RREG32(mmUVD_SEMA_ADDR_LOW
));
885 dev_info(adev
->dev
, " UVD_SEMA_ADDR_HIGH=0x%08X\n",
886 RREG32(mmUVD_SEMA_ADDR_HIGH
));
887 dev_info(adev
->dev
, " UVD_SEMA_CMD=0x%08X\n",
888 RREG32(mmUVD_SEMA_CMD
));
889 dev_info(adev
->dev
, " UVD_GPCOM_VCPU_CMD=0x%08X\n",
890 RREG32(mmUVD_GPCOM_VCPU_CMD
));
891 dev_info(adev
->dev
, " UVD_GPCOM_VCPU_DATA0=0x%08X\n",
892 RREG32(mmUVD_GPCOM_VCPU_DATA0
));
893 dev_info(adev
->dev
, " UVD_GPCOM_VCPU_DATA1=0x%08X\n",
894 RREG32(mmUVD_GPCOM_VCPU_DATA1
));
895 dev_info(adev
->dev
, " UVD_ENGINE_CNTL=0x%08X\n",
896 RREG32(mmUVD_ENGINE_CNTL
));
897 dev_info(adev
->dev
, " UVD_UDEC_ADDR_CONFIG=0x%08X\n",
898 RREG32(mmUVD_UDEC_ADDR_CONFIG
));
899 dev_info(adev
->dev
, " UVD_UDEC_DB_ADDR_CONFIG=0x%08X\n",
900 RREG32(mmUVD_UDEC_DB_ADDR_CONFIG
));
901 dev_info(adev
->dev
, " UVD_UDEC_DBW_ADDR_CONFIG=0x%08X\n",
902 RREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
));
903 dev_info(adev
->dev
, " UVD_SEMA_CNTL=0x%08X\n",
904 RREG32(mmUVD_SEMA_CNTL
));
905 dev_info(adev
->dev
, " UVD_LMI_EXT40_ADDR=0x%08X\n",
906 RREG32(mmUVD_LMI_EXT40_ADDR
));
907 dev_info(adev
->dev
, " UVD_CTX_INDEX=0x%08X\n",
908 RREG32(mmUVD_CTX_INDEX
));
909 dev_info(adev
->dev
, " UVD_CTX_DATA=0x%08X\n",
910 RREG32(mmUVD_CTX_DATA
));
911 dev_info(adev
->dev
, " UVD_CGC_GATE=0x%08X\n",
912 RREG32(mmUVD_CGC_GATE
));
913 dev_info(adev
->dev
, " UVD_CGC_CTRL=0x%08X\n",
914 RREG32(mmUVD_CGC_CTRL
));
915 dev_info(adev
->dev
, " UVD_LMI_CTRL2=0x%08X\n",
916 RREG32(mmUVD_LMI_CTRL2
));
917 dev_info(adev
->dev
, " UVD_MASTINT_EN=0x%08X\n",
918 RREG32(mmUVD_MASTINT_EN
));
919 dev_info(adev
->dev
, " UVD_LMI_ADDR_EXT=0x%08X\n",
920 RREG32(mmUVD_LMI_ADDR_EXT
));
921 dev_info(adev
->dev
, " UVD_LMI_CTRL=0x%08X\n",
922 RREG32(mmUVD_LMI_CTRL
));
923 dev_info(adev
->dev
, " UVD_LMI_SWAP_CNTL=0x%08X\n",
924 RREG32(mmUVD_LMI_SWAP_CNTL
));
925 dev_info(adev
->dev
, " UVD_MP_SWAP_CNTL=0x%08X\n",
926 RREG32(mmUVD_MP_SWAP_CNTL
));
927 dev_info(adev
->dev
, " UVD_MPC_SET_MUXA0=0x%08X\n",
928 RREG32(mmUVD_MPC_SET_MUXA0
));
929 dev_info(adev
->dev
, " UVD_MPC_SET_MUXA1=0x%08X\n",
930 RREG32(mmUVD_MPC_SET_MUXA1
));
931 dev_info(adev
->dev
, " UVD_MPC_SET_MUXB0=0x%08X\n",
932 RREG32(mmUVD_MPC_SET_MUXB0
));
933 dev_info(adev
->dev
, " UVD_MPC_SET_MUXB1=0x%08X\n",
934 RREG32(mmUVD_MPC_SET_MUXB1
));
935 dev_info(adev
->dev
, " UVD_MPC_SET_MUX=0x%08X\n",
936 RREG32(mmUVD_MPC_SET_MUX
));
937 dev_info(adev
->dev
, " UVD_MPC_SET_ALU=0x%08X\n",
938 RREG32(mmUVD_MPC_SET_ALU
));
939 dev_info(adev
->dev
, " UVD_VCPU_CACHE_OFFSET0=0x%08X\n",
940 RREG32(mmUVD_VCPU_CACHE_OFFSET0
));
941 dev_info(adev
->dev
, " UVD_VCPU_CACHE_SIZE0=0x%08X\n",
942 RREG32(mmUVD_VCPU_CACHE_SIZE0
));
943 dev_info(adev
->dev
, " UVD_VCPU_CACHE_OFFSET1=0x%08X\n",
944 RREG32(mmUVD_VCPU_CACHE_OFFSET1
));
945 dev_info(adev
->dev
, " UVD_VCPU_CACHE_SIZE1=0x%08X\n",
946 RREG32(mmUVD_VCPU_CACHE_SIZE1
));
947 dev_info(adev
->dev
, " UVD_VCPU_CACHE_OFFSET2=0x%08X\n",
948 RREG32(mmUVD_VCPU_CACHE_OFFSET2
));
949 dev_info(adev
->dev
, " UVD_VCPU_CACHE_SIZE2=0x%08X\n",
950 RREG32(mmUVD_VCPU_CACHE_SIZE2
));
951 dev_info(adev
->dev
, " UVD_VCPU_CNTL=0x%08X\n",
952 RREG32(mmUVD_VCPU_CNTL
));
953 dev_info(adev
->dev
, " UVD_SOFT_RESET=0x%08X\n",
954 RREG32(mmUVD_SOFT_RESET
));
955 dev_info(adev
->dev
, " UVD_RBC_IB_SIZE=0x%08X\n",
956 RREG32(mmUVD_RBC_IB_SIZE
));
957 dev_info(adev
->dev
, " UVD_RBC_RB_RPTR=0x%08X\n",
958 RREG32(mmUVD_RBC_RB_RPTR
));
959 dev_info(adev
->dev
, " UVD_RBC_RB_WPTR=0x%08X\n",
960 RREG32(mmUVD_RBC_RB_WPTR
));
961 dev_info(adev
->dev
, " UVD_RBC_RB_WPTR_CNTL=0x%08X\n",
962 RREG32(mmUVD_RBC_RB_WPTR_CNTL
));
963 dev_info(adev
->dev
, " UVD_RBC_RB_CNTL=0x%08X\n",
964 RREG32(mmUVD_RBC_RB_CNTL
));
965 dev_info(adev
->dev
, " UVD_STATUS=0x%08X\n",
966 RREG32(mmUVD_STATUS
));
967 dev_info(adev
->dev
, " UVD_SEMA_TIMEOUT_STATUS=0x%08X\n",
968 RREG32(mmUVD_SEMA_TIMEOUT_STATUS
));
969 dev_info(adev
->dev
, " UVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
970 RREG32(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL
));
971 dev_info(adev
->dev
, " UVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL=0x%08X\n",
972 RREG32(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL
));
973 dev_info(adev
->dev
, " UVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL=0x%08X\n",
974 RREG32(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL
));
975 dev_info(adev
->dev
, " UVD_CONTEXT_ID=0x%08X\n",
976 RREG32(mmUVD_CONTEXT_ID
));
979 static int uvd_v6_0_set_interrupt_state(struct amdgpu_device
*adev
,
980 struct amdgpu_irq_src
*source
,
982 enum amdgpu_interrupt_state state
)
988 static int uvd_v6_0_process_interrupt(struct amdgpu_device
*adev
,
989 struct amdgpu_irq_src
*source
,
990 struct amdgpu_iv_entry
*entry
)
992 DRM_DEBUG("IH: UVD TRAP\n");
993 amdgpu_fence_process(&adev
->uvd
.ring
);
997 static int uvd_v6_0_set_clockgating_state(void *handle
,
998 enum amd_clockgating_state state
)
1000 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1001 bool enable
= (state
== AMD_CG_STATE_GATE
) ? true : false;
1003 if (!(adev
->cg_flags
& AMDGPU_CG_SUPPORT_UVD_MGCG
))
1007 if (adev
->flags
& AMD_IS_APU
)
1008 cz_set_uvd_clock_gating_branches(adev
, enable
);
1010 tonga_set_uvd_clock_gating_branches(adev
, enable
);
1011 uvd_v6_0_set_uvd_dynamic_clock_mode(adev
, true);
1013 uint32_t data
= RREG32(mmUVD_CGC_CTRL
);
1014 data
&= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK
;
1015 WREG32(mmUVD_CGC_CTRL
, data
);
1021 static int uvd_v6_0_set_powergating_state(void *handle
,
1022 enum amd_powergating_state state
)
1024 /* This doesn't actually powergate the UVD block.
1025 * That's done in the dpm code via the SMC. This
1026 * just re-inits the block as necessary. The actual
1027 * gating still happens in the dpm code. We should
1028 * revisit this when there is a cleaner line between
1029 * the smc and the hw blocks
1031 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1033 if (state
== AMD_PG_STATE_GATE
) {
1034 uvd_v6_0_stop(adev
);
1037 return uvd_v6_0_start(adev
);
1041 const struct amd_ip_funcs uvd_v6_0_ip_funcs
= {
1042 .early_init
= uvd_v6_0_early_init
,
1044 .sw_init
= uvd_v6_0_sw_init
,
1045 .sw_fini
= uvd_v6_0_sw_fini
,
1046 .hw_init
= uvd_v6_0_hw_init
,
1047 .hw_fini
= uvd_v6_0_hw_fini
,
1048 .suspend
= uvd_v6_0_suspend
,
1049 .resume
= uvd_v6_0_resume
,
1050 .is_idle
= uvd_v6_0_is_idle
,
1051 .wait_for_idle
= uvd_v6_0_wait_for_idle
,
1052 .soft_reset
= uvd_v6_0_soft_reset
,
1053 .print_status
= uvd_v6_0_print_status
,
1054 .set_clockgating_state
= uvd_v6_0_set_clockgating_state
,
1055 .set_powergating_state
= uvd_v6_0_set_powergating_state
,
1058 static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs
= {
1059 .get_rptr
= uvd_v6_0_ring_get_rptr
,
1060 .get_wptr
= uvd_v6_0_ring_get_wptr
,
1061 .set_wptr
= uvd_v6_0_ring_set_wptr
,
1062 .parse_cs
= amdgpu_uvd_ring_parse_cs
,
1063 .emit_ib
= uvd_v6_0_ring_emit_ib
,
1064 .emit_fence
= uvd_v6_0_ring_emit_fence
,
1065 .emit_semaphore
= uvd_v6_0_ring_emit_semaphore
,
1066 .test_ring
= uvd_v6_0_ring_test_ring
,
1067 .test_ib
= uvd_v6_0_ring_test_ib
,
1068 .insert_nop
= amdgpu_ring_insert_nop
,
1071 static void uvd_v6_0_set_ring_funcs(struct amdgpu_device
*adev
)
1073 adev
->uvd
.ring
.funcs
= &uvd_v6_0_ring_funcs
;
1076 static const struct amdgpu_irq_src_funcs uvd_v6_0_irq_funcs
= {
1077 .set
= uvd_v6_0_set_interrupt_state
,
1078 .process
= uvd_v6_0_process_interrupt
,
1081 static void uvd_v6_0_set_irq_funcs(struct amdgpu_device
*adev
)
1083 adev
->uvd
.irq
.num_types
= 1;
1084 adev
->uvd
.irq
.funcs
= &uvd_v6_0_irq_funcs
;