2 * Copyright 2014 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
25 * Authors: Christian König <christian.koenig@amd.com>
28 #include <linux/firmware.h>
31 #include "amdgpu_vce.h"
33 #include "vce/vce_3_0_d.h"
34 #include "vce/vce_3_0_sh_mask.h"
35 #include "oss/oss_2_0_d.h"
36 #include "oss/oss_2_0_sh_mask.h"
38 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
);
39 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
);
40 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
);
43 * vce_v3_0_ring_get_rptr - get read pointer
45 * @ring: amdgpu_ring pointer
47 * Returns the current hardware read pointer
49 static uint32_t vce_v3_0_ring_get_rptr(struct amdgpu_ring
*ring
)
51 struct amdgpu_device
*adev
= ring
->adev
;
53 if (ring
== &adev
->vce
.ring
[0])
54 return RREG32(mmVCE_RB_RPTR
);
56 return RREG32(mmVCE_RB_RPTR2
);
60 * vce_v3_0_ring_get_wptr - get write pointer
62 * @ring: amdgpu_ring pointer
64 * Returns the current hardware write pointer
66 static uint32_t vce_v3_0_ring_get_wptr(struct amdgpu_ring
*ring
)
68 struct amdgpu_device
*adev
= ring
->adev
;
70 if (ring
== &adev
->vce
.ring
[0])
71 return RREG32(mmVCE_RB_WPTR
);
73 return RREG32(mmVCE_RB_WPTR2
);
77 * vce_v3_0_ring_set_wptr - set write pointer
79 * @ring: amdgpu_ring pointer
81 * Commits the write pointer to the hardware
83 static void vce_v3_0_ring_set_wptr(struct amdgpu_ring
*ring
)
85 struct amdgpu_device
*adev
= ring
->adev
;
87 if (ring
== &adev
->vce
.ring
[0])
88 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
90 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
94 * vce_v3_0_start - start VCE block
96 * @adev: amdgpu_device pointer
98 * Setup and start the VCE block
100 static int vce_v3_0_start(struct amdgpu_device
*adev
)
102 struct amdgpu_ring
*ring
;
105 vce_v3_0_mc_resume(adev
);
108 WREG32_P(mmVCE_STATUS
, 1, ~1);
110 ring
= &adev
->vce
.ring
[0];
111 WREG32(mmVCE_RB_RPTR
, ring
->wptr
);
112 WREG32(mmVCE_RB_WPTR
, ring
->wptr
);
113 WREG32(mmVCE_RB_BASE_LO
, ring
->gpu_addr
);
114 WREG32(mmVCE_RB_BASE_HI
, upper_32_bits(ring
->gpu_addr
));
115 WREG32(mmVCE_RB_SIZE
, ring
->ring_size
/ 4);
117 ring
= &adev
->vce
.ring
[1];
118 WREG32(mmVCE_RB_RPTR2
, ring
->wptr
);
119 WREG32(mmVCE_RB_WPTR2
, ring
->wptr
);
120 WREG32(mmVCE_RB_BASE_LO2
, ring
->gpu_addr
);
121 WREG32(mmVCE_RB_BASE_HI2
, upper_32_bits(ring
->gpu_addr
));
122 WREG32(mmVCE_RB_SIZE2
, ring
->ring_size
/ 4);
124 WREG32_P(mmVCE_VCPU_CNTL
, VCE_VCPU_CNTL__CLK_EN_MASK
, ~VCE_VCPU_CNTL__CLK_EN_MASK
);
126 WREG32_P(mmVCE_SOFT_RESET
,
127 VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
128 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
132 WREG32_P(mmVCE_SOFT_RESET
, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
134 for (i
= 0; i
< 10; ++i
) {
136 for (j
= 0; j
< 100; ++j
) {
137 status
= RREG32(mmVCE_STATUS
);
146 DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
147 WREG32_P(mmVCE_SOFT_RESET
, VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
,
148 ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
150 WREG32_P(mmVCE_SOFT_RESET
, 0, ~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK
);
155 /* clear BUSY flag */
156 WREG32_P(mmVCE_STATUS
, 0, ~1);
159 DRM_ERROR("VCE not responding, giving up!!!\n");
166 static int vce_v3_0_early_init(struct amdgpu_device
*adev
)
168 vce_v3_0_set_ring_funcs(adev
);
169 vce_v3_0_set_irq_funcs(adev
);
174 static int vce_v3_0_sw_init(struct amdgpu_device
*adev
)
176 struct amdgpu_ring
*ring
;
180 r
= amdgpu_irq_add_id(adev
, 167, &adev
->vce
.irq
);
184 r
= amdgpu_vce_sw_init(adev
);
188 r
= amdgpu_vce_resume(adev
);
192 ring
= &adev
->vce
.ring
[0];
193 sprintf(ring
->name
, "vce0");
194 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
195 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
199 ring
= &adev
->vce
.ring
[1];
200 sprintf(ring
->name
, "vce1");
201 r
= amdgpu_ring_init(adev
, ring
, 4096, VCE_CMD_NO_OP
, 0xf,
202 &adev
->vce
.irq
, 0, AMDGPU_RING_TYPE_VCE
);
209 static int vce_v3_0_sw_fini(struct amdgpu_device
*adev
)
213 r
= amdgpu_vce_suspend(adev
);
217 r
= amdgpu_vce_sw_fini(adev
);
224 static int vce_v3_0_hw_init(struct amdgpu_device
*adev
)
226 struct amdgpu_ring
*ring
;
229 r
= vce_v3_0_start(adev
);
233 ring
= &adev
->vce
.ring
[0];
235 r
= amdgpu_ring_test_ring(ring
);
241 ring
= &adev
->vce
.ring
[1];
243 r
= amdgpu_ring_test_ring(ring
);
249 DRM_INFO("VCE initialized successfully.\n");
254 static int vce_v3_0_hw_fini(struct amdgpu_device
*adev
)
260 static int vce_v3_0_suspend(struct amdgpu_device
*adev
)
264 r
= vce_v3_0_hw_fini(adev
);
268 r
= amdgpu_vce_suspend(adev
);
275 static int vce_v3_0_resume(struct amdgpu_device
*adev
)
279 r
= amdgpu_vce_resume(adev
);
283 r
= vce_v3_0_hw_init(adev
);
290 static void vce_v3_0_mc_resume(struct amdgpu_device
*adev
)
292 uint32_t offset
, size
;
294 WREG32_P(mmVCE_CLOCK_GATING_A
, 0, ~(1 << 16));
295 WREG32_P(mmVCE_UENC_CLOCK_GATING
, 0x1FF000, ~0xFF9FF000);
296 WREG32_P(mmVCE_UENC_REG_CLOCK_GATING
, 0x3F, ~0x3F);
297 WREG32(mmVCE_CLOCK_GATING_B
, 0xf7);
299 WREG32(mmVCE_LMI_CTRL
, 0x00398000);
300 WREG32_P(mmVCE_LMI_CACHE_CTRL
, 0x0, ~0x1);
301 WREG32(mmVCE_LMI_SWAP_CNTL
, 0);
302 WREG32(mmVCE_LMI_SWAP_CNTL1
, 0);
303 WREG32(mmVCE_LMI_VM_CTRL
, 0);
305 WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR
, (adev
->vce
.gpu_addr
>> 8));
306 offset
= AMDGPU_VCE_FIRMWARE_OFFSET
;
307 size
= AMDGPU_GPU_PAGE_ALIGN(adev
->vce
.fw
->size
);
308 WREG32(mmVCE_VCPU_CACHE_OFFSET0
, offset
& 0x7fffffff);
309 WREG32(mmVCE_VCPU_CACHE_SIZE0
, size
);
312 size
= AMDGPU_VCE_STACK_SIZE
;
313 WREG32(mmVCE_VCPU_CACHE_OFFSET1
, offset
& 0x7fffffff);
314 WREG32(mmVCE_VCPU_CACHE_SIZE1
, size
);
317 size
= AMDGPU_VCE_HEAP_SIZE
;
318 WREG32(mmVCE_VCPU_CACHE_OFFSET2
, offset
& 0x7fffffff);
319 WREG32(mmVCE_VCPU_CACHE_SIZE2
, size
);
321 WREG32_P(mmVCE_LMI_CTRL2
, 0x0, ~0x100);
323 WREG32_P(mmVCE_SYS_INT_EN
, VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
,
324 ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
327 static bool vce_v3_0_is_idle(struct amdgpu_device
*adev
)
329 return !(RREG32(mmSRBM_STATUS2
) & SRBM_STATUS2__VCE_BUSY_MASK
);
332 static int vce_v3_0_wait_for_idle(struct amdgpu_device
*adev
)
336 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
337 if (!(RREG32(mmSRBM_STATUS2
) & SRBM_STATUS2__VCE_BUSY_MASK
))
343 static int vce_v3_0_soft_reset(struct amdgpu_device
*adev
)
345 WREG32_P(mmSRBM_SOFT_RESET
, SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK
,
346 ~SRBM_SOFT_RESET__SOFT_RESET_VCE_MASK
);
349 return vce_v3_0_start(adev
);
352 static void vce_v3_0_print_status(struct amdgpu_device
*adev
)
354 dev_info(adev
->dev
, "VCE 3.0 registers\n");
355 dev_info(adev
->dev
, " VCE_STATUS=0x%08X\n",
356 RREG32(mmVCE_STATUS
));
357 dev_info(adev
->dev
, " VCE_VCPU_CNTL=0x%08X\n",
358 RREG32(mmVCE_VCPU_CNTL
));
359 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET0=0x%08X\n",
360 RREG32(mmVCE_VCPU_CACHE_OFFSET0
));
361 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE0=0x%08X\n",
362 RREG32(mmVCE_VCPU_CACHE_SIZE0
));
363 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET1=0x%08X\n",
364 RREG32(mmVCE_VCPU_CACHE_OFFSET1
));
365 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE1=0x%08X\n",
366 RREG32(mmVCE_VCPU_CACHE_SIZE1
));
367 dev_info(adev
->dev
, " VCE_VCPU_CACHE_OFFSET2=0x%08X\n",
368 RREG32(mmVCE_VCPU_CACHE_OFFSET2
));
369 dev_info(adev
->dev
, " VCE_VCPU_CACHE_SIZE2=0x%08X\n",
370 RREG32(mmVCE_VCPU_CACHE_SIZE2
));
371 dev_info(adev
->dev
, " VCE_SOFT_RESET=0x%08X\n",
372 RREG32(mmVCE_SOFT_RESET
));
373 dev_info(adev
->dev
, " VCE_RB_BASE_LO2=0x%08X\n",
374 RREG32(mmVCE_RB_BASE_LO2
));
375 dev_info(adev
->dev
, " VCE_RB_BASE_HI2=0x%08X\n",
376 RREG32(mmVCE_RB_BASE_HI2
));
377 dev_info(adev
->dev
, " VCE_RB_SIZE2=0x%08X\n",
378 RREG32(mmVCE_RB_SIZE2
));
379 dev_info(adev
->dev
, " VCE_RB_RPTR2=0x%08X\n",
380 RREG32(mmVCE_RB_RPTR2
));
381 dev_info(adev
->dev
, " VCE_RB_WPTR2=0x%08X\n",
382 RREG32(mmVCE_RB_WPTR2
));
383 dev_info(adev
->dev
, " VCE_RB_BASE_LO=0x%08X\n",
384 RREG32(mmVCE_RB_BASE_LO
));
385 dev_info(adev
->dev
, " VCE_RB_BASE_HI=0x%08X\n",
386 RREG32(mmVCE_RB_BASE_HI
));
387 dev_info(adev
->dev
, " VCE_RB_SIZE=0x%08X\n",
388 RREG32(mmVCE_RB_SIZE
));
389 dev_info(adev
->dev
, " VCE_RB_RPTR=0x%08X\n",
390 RREG32(mmVCE_RB_RPTR
));
391 dev_info(adev
->dev
, " VCE_RB_WPTR=0x%08X\n",
392 RREG32(mmVCE_RB_WPTR
));
393 dev_info(adev
->dev
, " VCE_CLOCK_GATING_A=0x%08X\n",
394 RREG32(mmVCE_CLOCK_GATING_A
));
395 dev_info(adev
->dev
, " VCE_CLOCK_GATING_B=0x%08X\n",
396 RREG32(mmVCE_CLOCK_GATING_B
));
397 dev_info(adev
->dev
, " VCE_UENC_CLOCK_GATING=0x%08X\n",
398 RREG32(mmVCE_UENC_CLOCK_GATING
));
399 dev_info(adev
->dev
, " VCE_UENC_REG_CLOCK_GATING=0x%08X\n",
400 RREG32(mmVCE_UENC_REG_CLOCK_GATING
));
401 dev_info(adev
->dev
, " VCE_SYS_INT_EN=0x%08X\n",
402 RREG32(mmVCE_SYS_INT_EN
));
403 dev_info(adev
->dev
, " VCE_LMI_CTRL2=0x%08X\n",
404 RREG32(mmVCE_LMI_CTRL2
));
405 dev_info(adev
->dev
, " VCE_LMI_CTRL=0x%08X\n",
406 RREG32(mmVCE_LMI_CTRL
));
407 dev_info(adev
->dev
, " VCE_LMI_VM_CTRL=0x%08X\n",
408 RREG32(mmVCE_LMI_VM_CTRL
));
409 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL=0x%08X\n",
410 RREG32(mmVCE_LMI_SWAP_CNTL
));
411 dev_info(adev
->dev
, " VCE_LMI_SWAP_CNTL1=0x%08X\n",
412 RREG32(mmVCE_LMI_SWAP_CNTL1
));
413 dev_info(adev
->dev
, " VCE_LMI_CACHE_CTRL=0x%08X\n",
414 RREG32(mmVCE_LMI_CACHE_CTRL
));
417 static int vce_v3_0_set_interrupt_state(struct amdgpu_device
*adev
,
418 struct amdgpu_irq_src
*source
,
420 enum amdgpu_interrupt_state state
)
424 if (state
== AMDGPU_IRQ_STATE_ENABLE
)
425 val
|= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
;
427 WREG32_P(mmVCE_SYS_INT_EN
, val
, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK
);
431 static int vce_v3_0_process_interrupt(struct amdgpu_device
*adev
,
432 struct amdgpu_irq_src
*source
,
433 struct amdgpu_iv_entry
*entry
)
435 DRM_DEBUG("IH: VCE\n");
436 switch (entry
->src_data
) {
438 amdgpu_fence_process(&adev
->vce
.ring
[0]);
441 amdgpu_fence_process(&adev
->vce
.ring
[1]);
444 DRM_ERROR("Unhandled interrupt: %d %d\n",
445 entry
->src_id
, entry
->src_data
);
452 static int vce_v3_0_set_clockgating_state(struct amdgpu_device
*adev
,
453 enum amdgpu_clockgating_state state
)
459 static int vce_v3_0_set_powergating_state(struct amdgpu_device
*adev
,
460 enum amdgpu_powergating_state state
)
462 /* This doesn't actually powergate the VCE block.
463 * That's done in the dpm code via the SMC. This
464 * just re-inits the block as necessary. The actual
465 * gating still happens in the dpm code. We should
466 * revisit this when there is a cleaner line between
467 * the smc and the hw blocks
469 if (state
== AMDGPU_PG_STATE_GATE
)
470 /* XXX do we need a vce_v3_0_stop()? */
473 return vce_v3_0_start(adev
);
476 const struct amdgpu_ip_funcs vce_v3_0_ip_funcs
= {
477 .early_init
= vce_v3_0_early_init
,
479 .sw_init
= vce_v3_0_sw_init
,
480 .sw_fini
= vce_v3_0_sw_fini
,
481 .hw_init
= vce_v3_0_hw_init
,
482 .hw_fini
= vce_v3_0_hw_fini
,
483 .suspend
= vce_v3_0_suspend
,
484 .resume
= vce_v3_0_resume
,
485 .is_idle
= vce_v3_0_is_idle
,
486 .wait_for_idle
= vce_v3_0_wait_for_idle
,
487 .soft_reset
= vce_v3_0_soft_reset
,
488 .print_status
= vce_v3_0_print_status
,
489 .set_clockgating_state
= vce_v3_0_set_clockgating_state
,
490 .set_powergating_state
= vce_v3_0_set_powergating_state
,
493 static const struct amdgpu_ring_funcs vce_v3_0_ring_funcs
= {
494 .get_rptr
= vce_v3_0_ring_get_rptr
,
495 .get_wptr
= vce_v3_0_ring_get_wptr
,
496 .set_wptr
= vce_v3_0_ring_set_wptr
,
497 .parse_cs
= amdgpu_vce_ring_parse_cs
,
498 .emit_ib
= amdgpu_vce_ring_emit_ib
,
499 .emit_fence
= amdgpu_vce_ring_emit_fence
,
500 .emit_semaphore
= amdgpu_vce_ring_emit_semaphore
,
501 .test_ring
= amdgpu_vce_ring_test_ring
,
502 .test_ib
= amdgpu_vce_ring_test_ib
,
503 .is_lockup
= amdgpu_ring_test_lockup
,
506 static void vce_v3_0_set_ring_funcs(struct amdgpu_device
*adev
)
508 adev
->vce
.ring
[0].funcs
= &vce_v3_0_ring_funcs
;
509 adev
->vce
.ring
[1].funcs
= &vce_v3_0_ring_funcs
;
512 static const struct amdgpu_irq_src_funcs vce_v3_0_irq_funcs
= {
513 .set
= vce_v3_0_set_interrupt_state
,
514 .process
= vce_v3_0_process_interrupt
,
517 static void vce_v3_0_set_irq_funcs(struct amdgpu_device
*adev
)
519 adev
->vce
.irq
.num_types
= 1;
520 adev
->vce
.irq
.funcs
= &vce_v3_0_irq_funcs
;