2 * Copyright 2013 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "amdgpu_vce.h"
34 #include "vce/vce_2_0_d.h"
35 #include "vce/vce_2_0_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
40 #define VCE_V2_0_FW_SIZE (256 * 1024)
41 #define VCE_V2_0_STACK_SIZE (64 * 1024)
42 #define VCE_V2_0_DATA_SIZE (23552 * AMDGPU_MAX_VCE_HANDLES)
44 static void vce_v2_0_mc_resume(struct amdgpu_device
*adev
);
45 static void vce_v2_0_set_ring_funcs(struct amdgpu_device
*adev
);
46 static void vce_v2_0_set_irq_funcs(struct amdgpu_device
*adev
);
49 * vce_v2_0_ring_get_rptr - get read pointer
51 * @ring: amdgpu_ring pointer
53 * Returns the current hardware read pointer
55 static uint32_t vce_v2_0_ring_get_rptr(struct amdgpu_ring
*ring
)
57 struct amdgpu_device
*adev
= ring
->adev
;
59 if (ring
== &adev
->vce
.ring
[0])
60 return RREG32(mmVCE_RB_RPTR
);
62 return RREG32(mmVCE_RB_RPTR2
);
66 * vce_v2_0_ring_get_wptr - get write pointer
68 * @ring: amdgpu_ring pointer
70 * Returns the current hardware write pointer
72 static uint32_t vce_v2_0_ring_get_wptr(struct amdgpu_ring
*ring
)
74 struct amdgpu_device
*adev
= ring
->adev
;
76 if (ring
== &adev
->vce
.ring
[0])
77 return RREG32(mmVCE_RB_WPTR
);
79 return RREG32(mmVCE_RB_WPTR2
);
83 * vce_v2_0_ring_set_wptr - set write pointer
85 * @ring: amdgpu_ring pointer
87 * Commits the write pointer to the hardware
89 static void vce_v2_0_ring_set_wptr(struct amdgpu_ring
*ring
)
91 struct amdgpu_device
*adev
= ring
->adev
;
93 if (ring
== &adev
->vce
.ring
[0])
94 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
96 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
100 * vce_v2_0_start - start VCE block
102 * @adev: amdgpu_device pointer
104 * Setup and start the VCE block
106 static int vce_v2_0_start(struct amdgpu_device
*adev
)
108 struct amdgpu_ring
*ring
;
111 vce_v2_0_mc_resume(adev
);
114 WREG32_P(mmVCE_STATUS
, 1, ~1);
116 ring
= &adev
->vce
.ring
[0];
117 WREG32(mmVCE_RB_RPTR
, ring
->wptr
);
118 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
119 WREG32(mmVCE_RB_BASE_LO
, ring
->gpu_addr
);
120 WREG32(mmVCE_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
121 WREG32(mmVCE_RB_SIZE
, ring
->ring_size
/ 4);
123 ring
= &adev
->vce
.ring
[1];
124 WREG32(mmVCE_RB_RPTR2
, ring
->wptr
);
125 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
126 WREG32(mmVCE_RB_BASE_LO2
, ring
->gpu_addr
);
127 WREG32(mmVCE_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
128 WREG32(mmVCE_RB_SIZE2
, ring
->ring_size
/ 4);
130 WREG32_P(mmVCE_VCPU_CNTL
, VCE_VCPU_CNTL__CLK_EN_MASK
, ~VCE_VCPU_CNTL__CLK_EN_MASK
);
132 WREG32_P(mmVCE_SOFT_RESET
,
133 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
134 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
138 WREG32_P(mmVCE_SOFT_RESET
, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
140 for (i
= 0; i
< 10; ++i
) {
142 for (j
= 0; j
< 100; ++j
) {
143 status
= RREG32(mmVCE_STATUS
);
152 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
153 WREG32_P(mmVCE_SOFT_RESET
, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
154 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
156 WREG32_P(mmVCE_SOFT_RESET
, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
161 /* clear BUSY flag */
162 WREG32_P(mmVCE_STATUS
, 0, ~1);
165 DRM_ERROR("VCE not responding, giving up!!!\n");
172 static int vce_v2_0_early_init(void *handle
)
174 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
176 vce_v2_0_set_ring_funcs(adev
);
177 vce_v2_0_set_irq_funcs(adev
);
182 static int vce_v2_0_sw_init(void *handle
)
184 struct amdgpu_ring
*ring
;
186 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
189 r
= amdgpu_irq_add_id(adev
, 167, &adev
->vce
.irq
);
193 r
= amdgpu_vce_sw_init(adev
, VCE_V2_0_FW_SIZE
+
194 VCE_V2_0_STACK_SIZE
+ VCE_V2_0_DATA_SIZE
);
198 r
= amdgpu_vce_resume(adev
);
202 ring
= &adev
->vce
.ring
[0];
203 sprintf(ring
->name
, "vce0");
204 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
205 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
209 ring
= &adev
->vce
.ring
[1];
210 sprintf(ring
->name
, "vce1");
211 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
212 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
219 static int vce_v2_0_sw_fini(void *handle
)
222 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
224 r
= amdgpu_vce_suspend(adev
);
228 r
= amdgpu_vce_sw_fini(adev
);
235 static int vce_v2_0_hw_init(void *handle
)
237 struct amdgpu_ring
*ring
;
239 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
241 r
= vce_v2_0_start(adev
);
245 ring
= &adev
->vce
.ring
[0];
247 r
= amdgpu_ring_test_ring(ring
);
253 ring
= &adev
->vce
.ring
[1];
255 r
= amdgpu_ring_test_ring(ring
);
261 DRM_INFO("VCE initialized successfully.\n");
266 static int vce_v2_0_hw_fini(void *handle
)
271 static int vce_v2_0_suspend(void *handle
)
274 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
276 r
= vce_v2_0_hw_fini(adev
);
280 r
= amdgpu_vce_suspend(adev
);
287 static int vce_v2_0_resume(void *handle
)
290 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
292 r
= amdgpu_vce_resume(adev
);
296 r
= vce_v2_0_hw_init(adev
);
303 static void vce_v2_0_set_sw_cg(struct amdgpu_device
*adev
, bool gated
)
308 tmp
= RREG32(mmVCE_CLOCK_GATING_B
);
310 WREG32(mmVCE_CLOCK_GATING_B
, tmp
);
312 tmp
= RREG32(mmVCE_UENC_CLOCK_GATING
);
314 WREG32(mmVCE_UENC_CLOCK_GATING
, tmp
);
316 tmp
= RREG32(mmVCE_UENC_REG_CLOCK_GATING
);
318 WREG32(mmVCE_UENC_REG_CLOCK_GATING
, tmp
);
320 WREG32(mmVCE_CGTT_CLK_OVERRIDE
, 0);
322 tmp
= RREG32(mmVCE_CLOCK_GATING_B
);
325 WREG32(mmVCE_CLOCK_GATING_B
, tmp
);
327 tmp
= RREG32(mmVCE_UENC_CLOCK_GATING
);
330 WREG32(mmVCE_UENC_CLOCK_GATING
, tmp
);
332 tmp
= RREG32(mmVCE_UENC_REG_CLOCK_GATING
);
334 WREG32(mmVCE_UENC_REG_CLOCK_GATING
, tmp
);
338 static void vce_v2_0_set_dyn_cg(struct amdgpu_device
*adev
, bool gated
)
342 tmp
= RREG32(mmVCE_CLOCK_GATING_B
);
350 WREG32(mmVCE_CLOCK_GATING_B
, tmp
);
352 orig
= tmp
= RREG32(mmVCE_UENC_CLOCK_GATING
);
356 WREG32(mmVCE_UENC_CLOCK_GATING
, tmp
);
358 orig
= tmp
= RREG32(mmVCE_UENC_REG_CLOCK_GATING
);
361 WREG32(mmVCE_UENC_REG_CLOCK_GATING
, tmp
);
364 WREG32(mmVCE_CGTT_CLK_OVERRIDE
, 0);
367 static void vce_v2_0_disable_cg(struct amdgpu_device
*adev
)
369 WREG32(mmVCE_CGTT_CLK_OVERRIDE
, 7);
372 static void vce_v2_0_enable_mgcg(struct amdgpu_device
*adev
, bool enable
)
376 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_VCE_MGCG
)) {
378 vce_v2_0_set_sw_cg(adev
, true);
380 vce_v2_0_set_dyn_cg(adev
, true);
382 vce_v2_0_disable_cg(adev
);
385 vce_v2_0_set_sw_cg(adev
, false);
387 vce_v2_0_set_dyn_cg(adev
, false);
391 static void vce_v2_0_init_cg(struct amdgpu_device
*adev
)
395 tmp
= RREG32(mmVCE_CLOCK_GATING_A
);
397 tmp
|= ((0 << 0) | (4 << 4));
399 WREG32(mmVCE_CLOCK_GATING_A
, tmp
);
401 tmp
= RREG32(mmVCE_UENC_CLOCK_GATING
);
403 tmp
|= ((0 << 0) | (4 << 4));
404 WREG32(mmVCE_UENC_CLOCK_GATING
, tmp
);
406 tmp
= RREG32(mmVCE_CLOCK_GATING_B
);
409 WREG32(mmVCE_CLOCK_GATING_B
, tmp
);
412 static void vce_v2_0_mc_resume(struct amdgpu_device
*adev
)
414 uint64_t addr
= adev
->vce
.gpu_addr
;
417 WREG32_P(mmVCE_CLOCK_GATING_A
, 0, ~(1 << 16));
418 WREG32_P(mmVCE_UENC_CLOCK_GATING
, 0x1FF000, ~0xFF9FF000);
419 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING
, 0x3F, ~0x3F);
420 WREG32(mmVCE_CLOCK_GATING_B
, 0xf7);
422 WREG32(mmVCE_LMI_CTRL
, 0x00398000);
423 WREG32_P(mmVCE_LMI_CACHE_CTRL
, 0x0, ~0x1);
424 WREG32(mmVCE_LMI_SWAP_CNTL
, 0);
425 WREG32(mmVCE_LMI_SWAP_CNTL1
, 0);
426 WREG32(mmVCE_LMI_VM_CTRL
, 0);
428 addr
+= AMDGPU_VCE_FIRMWARE_OFFSET
;
429 size
= VCE_V2_0_FW_SIZE
;
430 WREG32(mmVCE_VCPU_CACHE_OFFSET0
, addr
& 0x7fffffff);
431 WREG32(mmVCE_VCPU_CACHE_SIZE0
, size
);
434 size
= VCE_V2_0_STACK_SIZE
;
435 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, addr
& 0x7fffffff);
436 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
439 size
= VCE_V2_0_DATA_SIZE
;
440 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, addr
& 0x7fffffff);
441 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
443 WREG32_P(mmVCE_LMI_CTRL2
, 0x0, ~0x100);
445 WREG32_P(mmVCE_SYS_INT_EN
, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
,
446 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
448 vce_v2_0_init_cg(adev
);
451 static bool vce_v2_0_is_idle(void *handle
)
453 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
455 return !(RREG32(mmSRBM_STATUS2
) & SRBM_STATUS2__VCE_BUSY_MASK
);
458 static int vce_v2_0_wait_for_idle(void *handle
)
461 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
463 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
464 if (!(RREG32(mmSRBM_STATUS2
) & SRBM_STATUS2__VCE_BUSY_MASK
))
470 static int vce_v2_0_soft_reset(void *handle
)
472 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
474 WREG32_P(mmSRBM_SOFT_RESET
, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK
,
475 ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK
);
478 return vce_v2_0_start(adev
);
481 static void vce_v2_0_print_status(void *handle
)
483 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
485 dev_info(adev
->dev
, "VCE 2.0 registers\n");
486 dev_info(adev
->dev
, " VCE_STATUS=0x%08X\n",
487 RREG32(mmVCE_STATUS
));
488 dev_info(adev
->dev
, " VCE_VCPU_CNTL=0x%08X\n",
489 RREG32(mmVCE_VCPU_CNTL
));
490 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
491 RREG32(mmVCE_VCPU_CACHE_OFFSET0
));
492 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
493 RREG32(mmVCE_VCPU_CACHE_SIZE0
));
494 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
495 RREG32(mmVCE_VCPU_CACHE_OFFSET1
));
496 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
497 RREG32(mmVCE_VCPU_CACHE_SIZE1
));
498 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
499 RREG32(mmVCE_VCPU_CACHE_OFFSET2
));
500 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
501 RREG32(mmVCE_VCPU_CACHE_SIZE2
));
502 dev_info(adev
->dev
, " VCE_SOFT_RESET=0x%08X\n",
503 RREG32(mmVCE_SOFT_RESET
));
504 dev_info(adev
->dev
, " VCE_RB_BASE_LO2=0x%08X\n",
505 RREG32(mmVCE_RB_BASE_LO2
));
506 dev_info(adev
->dev
, " VCE_RB_BASE_HI2=0x%08X\n",
507 RREG32(mmVCE_RB_BASE_HI2
));
508 dev_info(adev
->dev
, " VCE_RB_SIZE2=0x%08X\n",
509 RREG32(mmVCE_RB_SIZE2
));
510 dev_info(adev
->dev
, " VCE_RB_RPTR2=0x%08X\n",
511 RREG32(mmVCE_RB_RPTR2
));
512 dev_info(adev
->dev
, " VCE_RB_WPTR2=0x%08X\n",
513 RREG32(mmVCE_RB_WPTR2
));
514 dev_info(adev
->dev
, " VCE_RB_BASE_LO=0x%08X\n",
515 RREG32(mmVCE_RB_BASE_LO
));
516 dev_info(adev
->dev
, " VCE_RB_BASE_HI=0x%08X\n",
517 RREG32(mmVCE_RB_BASE_HI
));
518 dev_info(adev
->dev
, " VCE_RB_SIZE=0x%08X\n",
519 RREG32(mmVCE_RB_SIZE
));
520 dev_info(adev
->dev
, " VCE_RB_RPTR=0x%08X\n",
521 RREG32(mmVCE_RB_RPTR
));
522 dev_info(adev
->dev
, " VCE_RB_WPTR=0x%08X\n",
523 RREG32(mmVCE_RB_WPTR
));
524 dev_info(adev
->dev
, " VCE_CLOCK_GATING_A=0x%08X\n",
525 RREG32(mmVCE_CLOCK_GATING_A
));
526 dev_info(adev
->dev
, " VCE_CLOCK_GATING_B=0x%08X\n",
527 RREG32(mmVCE_CLOCK_GATING_B
));
528 dev_info(adev
->dev
, " VCE_CGTT_CLK_OVERRIDE=0x%08X\n",
529 RREG32(mmVCE_CGTT_CLK_OVERRIDE
));
530 dev_info(adev
->dev
, " VCE_UENC_CLOCK_GATING=0x%08X\n",
531 RREG32(mmVCE_UENC_CLOCK_GATING
));
532 dev_info(adev
->dev
, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
533 RREG32(mmVCE_UENC_REG_CLOCK_GATING
));
534 dev_info(adev
->dev
, " VCE_SYS_INT_EN=0x%08X\n",
535 RREG32(mmVCE_SYS_INT_EN
));
536 dev_info(adev
->dev
, " VCE_LMI_CTRL2=0x%08X\n",
537 RREG32(mmVCE_LMI_CTRL2
));
538 dev_info(adev
->dev
, " VCE_LMI_CTRL=0x%08X\n",
539 RREG32(mmVCE_LMI_CTRL
));
540 dev_info(adev
->dev
, " VCE_LMI_VM_CTRL=0x%08X\n",
541 RREG32(mmVCE_LMI_VM_CTRL
));
542 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL=0x%08X\n",
543 RREG32(mmVCE_LMI_SWAP_CNTL
));
544 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
545 RREG32(mmVCE_LMI_SWAP_CNTL1
));
546 dev_info(adev
->dev
, " VCE_LMI_CACHE_CTRL=0x%08X\n",
547 RREG32(mmVCE_LMI_CACHE_CTRL
));
550 static int vce_v2_0_set_interrupt_state(struct amdgpu_device
*adev
,
551 struct amdgpu_irq_src
*source
,
553 enum amdgpu_interrupt_state state
)
557 if (state
== AMDGPU_IRQ_STATE_ENABLE
)
558 val
|= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
;
560 WREG32_P(mmVCE_SYS_INT_EN
, val
, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
564 static int vce_v2_0_process_interrupt(struct amdgpu_device
*adev
,
565 struct amdgpu_irq_src
*source
,
566 struct amdgpu_iv_entry
*entry
)
568 DRM_DEBUG("IH: VCE\n");
569 switch (entry
->src_data
) {
571 amdgpu_fence_process(&adev
->vce
.ring
[0]);
574 amdgpu_fence_process(&adev
->vce
.ring
[1]);
577 DRM_ERROR("Unhandled interrupt: %d %d\n",
578 entry
->src_id
, entry
->src_data
);
585 static int vce_v2_0_set_clockgating_state(void *handle
,
586 enum amd_clockgating_state state
)
589 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
591 if (state
== AMD_CG_STATE_GATE
)
594 vce_v2_0_enable_mgcg(adev
, gate
);
599 static int vce_v2_0_set_powergating_state(void *handle
,
600 enum amd_powergating_state state
)
602 /* This doesn't actually powergate the VCE block.
603 * That's done in the dpm code via the SMC. This
604 * just re-inits the block as necessary. The actual
605 * gating still happens in the dpm code. We should
606 * revisit this when there is a cleaner line between
607 * the smc and the hw blocks
609 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
611 if (state
== AMD_PG_STATE_GATE
)
612 /* XXX do we need a vce_v2_0_stop()? */
615 return vce_v2_0_start(adev
);
618 const struct amd_ip_funcs vce_v2_0_ip_funcs
= {
619 .early_init
= vce_v2_0_early_init
,
621 .sw_init
= vce_v2_0_sw_init
,
622 .sw_fini
= vce_v2_0_sw_fini
,
623 .hw_init
= vce_v2_0_hw_init
,
624 .hw_fini
= vce_v2_0_hw_fini
,
625 .suspend
= vce_v2_0_suspend
,
626 .resume
= vce_v2_0_resume
,
627 .is_idle
= vce_v2_0_is_idle
,
628 .wait_for_idle
= vce_v2_0_wait_for_idle
,
629 .soft_reset
= vce_v2_0_soft_reset
,
630 .print_status
= vce_v2_0_print_status
,
631 .set_clockgating_state
= vce_v2_0_set_clockgating_state
,
632 .set_powergating_state
= vce_v2_0_set_powergating_state
,
635 static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs
= {
636 .get_rptr
= vce_v2_0_ring_get_rptr
,
637 .get_wptr
= vce_v2_0_ring_get_wptr
,
638 .set_wptr
= vce_v2_0_ring_set_wptr
,
639 .parse_cs
= amdgpu_vce_ring_parse_cs
,
640 .emit_ib
= amdgpu_vce_ring_emit_ib
,
641 .emit_fence
= amdgpu_vce_ring_emit_fence
,
642 .emit_semaphore
= amdgpu_vce_ring_emit_semaphore
,
643 .test_ring
= amdgpu_vce_ring_test_ring
,
644 .test_ib
= amdgpu_vce_ring_test_ib
,
645 .insert_nop
= amdgpu_ring_insert_nop
,
648 static void vce_v2_0_set_ring_funcs(struct amdgpu_device
*adev
)
650 adev
->vce
.ring
[0].funcs
= &vce_v2_0_ring_funcs
;
651 adev
->vce
.ring
[1].funcs
= &vce_v2_0_ring_funcs
;
654 static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs
= {
655 .set
= vce_v2_0_set_interrupt_state
,
656 .process
= vce_v2_0_process_interrupt
,
659 static void vce_v2_0_set_irq_funcs(struct amdgpu_device
*adev
)
661 adev
->vce
.irq
.num_types
= 1;
662 adev
->vce
.irq
.funcs
= &vce_v2_0_irq_funcs
;