2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
28 #include "amdgpu_trace.h"
32 #include "oss/oss_3_0_d.h"
33 #include "oss/oss_3_0_sh_mask.h"
35 #include "gmc/gmc_8_1_d.h"
36 #include "gmc/gmc_8_1_sh_mask.h"
38 #include "gca/gfx_8_0_d.h"
39 #include "gca/gfx_8_0_enum.h"
40 #include "gca/gfx_8_0_sh_mask.h"
42 #include "bif/bif_5_0_d.h"
43 #include "bif/bif_5_0_sh_mask.h"
45 #include "tonga_sdma_pkt_open.h"
47 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device
*adev
);
48 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device
*adev
);
49 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device
*adev
);
50 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device
*adev
);
52 MODULE_FIRMWARE("amdgpu/tonga_sdma.bin");
53 MODULE_FIRMWARE("amdgpu/tonga_sdma1.bin");
54 MODULE_FIRMWARE("amdgpu/carrizo_sdma.bin");
55 MODULE_FIRMWARE("amdgpu/carrizo_sdma1.bin");
56 MODULE_FIRMWARE("amdgpu/fiji_sdma.bin");
57 MODULE_FIRMWARE("amdgpu/fiji_sdma1.bin");
58 MODULE_FIRMWARE("amdgpu/stoney_sdma.bin");
60 static const u32 sdma_offsets
[SDMA_MAX_INSTANCE
] =
62 SDMA0_REGISTER_OFFSET
,
66 static const u32 golden_settings_tonga_a11
[] =
68 mmSDMA0_CHICKEN_BITS
, 0xfc910007, 0x00810007,
69 mmSDMA0_CLK_CTRL
, 0xff000fff, 0x00000000,
70 mmSDMA0_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
71 mmSDMA0_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
72 mmSDMA0_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
73 mmSDMA1_CHICKEN_BITS
, 0xfc910007, 0x00810007,
74 mmSDMA1_CLK_CTRL
, 0xff000fff, 0x00000000,
75 mmSDMA1_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
76 mmSDMA1_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
77 mmSDMA1_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
80 static const u32 tonga_mgcg_cgcg_init
[] =
82 mmSDMA0_CLK_CTRL
, 0xff000ff0, 0x00000100,
83 mmSDMA1_CLK_CTRL
, 0xff000ff0, 0x00000100
86 static const u32 golden_settings_fiji_a10
[] =
88 mmSDMA0_CHICKEN_BITS
, 0xfc910007, 0x00810007,
89 mmSDMA0_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
90 mmSDMA0_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
91 mmSDMA0_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
92 mmSDMA1_CHICKEN_BITS
, 0xfc910007, 0x00810007,
93 mmSDMA1_GFX_IB_CNTL
, 0x800f0111, 0x00000100,
94 mmSDMA1_RLC0_IB_CNTL
, 0x800f0111, 0x00000100,
95 mmSDMA1_RLC1_IB_CNTL
, 0x800f0111, 0x00000100,
98 static const u32 fiji_mgcg_cgcg_init
[] =
100 mmSDMA0_CLK_CTRL
, 0xff000ff0, 0x00000100,
101 mmSDMA1_CLK_CTRL
, 0xff000ff0, 0x00000100
104 static const u32 cz_golden_settings_a11
[] =
106 mmSDMA0_CHICKEN_BITS
, 0xfc910007, 0x00810007,
107 mmSDMA0_CLK_CTRL
, 0xff000fff, 0x00000000,
108 mmSDMA0_GFX_IB_CNTL
, 0x00000100, 0x00000100,
109 mmSDMA0_POWER_CNTL
, 0x00000800, 0x0003c800,
110 mmSDMA0_RLC0_IB_CNTL
, 0x00000100, 0x00000100,
111 mmSDMA0_RLC1_IB_CNTL
, 0x00000100, 0x00000100,
112 mmSDMA1_CHICKEN_BITS
, 0xfc910007, 0x00810007,
113 mmSDMA1_CLK_CTRL
, 0xff000fff, 0x00000000,
114 mmSDMA1_GFX_IB_CNTL
, 0x00000100, 0x00000100,
115 mmSDMA1_POWER_CNTL
, 0x00000800, 0x0003c800,
116 mmSDMA1_RLC0_IB_CNTL
, 0x00000100, 0x00000100,
117 mmSDMA1_RLC1_IB_CNTL
, 0x00000100, 0x00000100,
120 static const u32 cz_mgcg_cgcg_init
[] =
122 mmSDMA0_CLK_CTRL
, 0xff000ff0, 0x00000100,
123 mmSDMA1_CLK_CTRL
, 0xff000ff0, 0x00000100
126 static const u32 stoney_golden_settings_a11
[] =
128 mmSDMA0_GFX_IB_CNTL
, 0x00000100, 0x00000100,
129 mmSDMA0_POWER_CNTL
, 0x00000800, 0x0003c800,
130 mmSDMA0_RLC0_IB_CNTL
, 0x00000100, 0x00000100,
131 mmSDMA0_RLC1_IB_CNTL
, 0x00000100, 0x00000100,
134 static const u32 stoney_mgcg_cgcg_init
[] =
136 mmSDMA0_CLK_CTRL
, 0xffffffff, 0x00000100,
141 * Starting with CIK, the GPU has new asynchronous
142 * DMA engines. These engines are used for compute
143 * and gfx. There are two DMA engines (SDMA0, SDMA1)
144 * and each one supports 1 ring buffer used for gfx
145 * and 2 queues used for compute.
147 * The programming model is very similar to the CP
148 * (ring buffer, IBs, etc.), but sDMA has it's own
149 * packet format that is different from the PM4 format
150 * used by the CP. sDMA supports copying data, writing
151 * embedded data, solid fills, and a number of other
152 * things. It also has support for tiling/detiling of
156 static void sdma_v3_0_init_golden_registers(struct amdgpu_device
*adev
)
158 switch (adev
->asic_type
) {
160 amdgpu_program_register_sequence(adev
,
162 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
163 amdgpu_program_register_sequence(adev
,
164 golden_settings_fiji_a10
,
165 (const u32
)ARRAY_SIZE(golden_settings_fiji_a10
));
168 amdgpu_program_register_sequence(adev
,
169 tonga_mgcg_cgcg_init
,
170 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
171 amdgpu_program_register_sequence(adev
,
172 golden_settings_tonga_a11
,
173 (const u32
)ARRAY_SIZE(golden_settings_tonga_a11
));
176 amdgpu_program_register_sequence(adev
,
178 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
179 amdgpu_program_register_sequence(adev
,
180 cz_golden_settings_a11
,
181 (const u32
)ARRAY_SIZE(cz_golden_settings_a11
));
184 amdgpu_program_register_sequence(adev
,
185 stoney_mgcg_cgcg_init
,
186 (const u32
)ARRAY_SIZE(stoney_mgcg_cgcg_init
));
187 amdgpu_program_register_sequence(adev
,
188 stoney_golden_settings_a11
,
189 (const u32
)ARRAY_SIZE(stoney_golden_settings_a11
));
197 * sdma_v3_0_init_microcode - load ucode images from disk
199 * @adev: amdgpu_device pointer
201 * Use the firmware interface to load the ucode images into
202 * the driver (not loaded into hw).
203 * Returns 0 on success, error on failure.
205 static int sdma_v3_0_init_microcode(struct amdgpu_device
*adev
)
207 const char *chip_name
;
210 struct amdgpu_firmware_info
*info
= NULL
;
211 const struct common_firmware_header
*header
= NULL
;
212 const struct sdma_firmware_header_v1_0
*hdr
;
216 switch (adev
->asic_type
) {
224 chip_name
= "carrizo";
227 chip_name
= "stoney";
232 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
234 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_sdma.bin", chip_name
);
236 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_sdma1.bin", chip_name
);
237 err
= request_firmware(&adev
->sdma
.instance
[i
].fw
, fw_name
, adev
->dev
);
240 err
= amdgpu_ucode_validate(adev
->sdma
.instance
[i
].fw
);
243 hdr
= (const struct sdma_firmware_header_v1_0
*)adev
->sdma
.instance
[i
].fw
->data
;
244 adev
->sdma
.instance
[i
].fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
245 adev
->sdma
.instance
[i
].feature_version
= le32_to_cpu(hdr
->ucode_feature_version
);
246 if (adev
->sdma
.instance
[i
].feature_version
>= 20)
247 adev
->sdma
.instance
[i
].burst_nop
= true;
249 if (adev
->firmware
.smu_load
) {
250 info
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_SDMA0
+ i
];
251 info
->ucode_id
= AMDGPU_UCODE_ID_SDMA0
+ i
;
252 info
->fw
= adev
->sdma
.instance
[i
].fw
;
253 header
= (const struct common_firmware_header
*)info
->fw
->data
;
254 adev
->firmware
.fw_size
+=
255 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
261 "sdma_v3_0: Failed to load firmware \"%s\"\n",
263 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
264 release_firmware(adev
->sdma
.instance
[i
].fw
);
265 adev
->sdma
.instance
[i
].fw
= NULL
;
272 * sdma_v3_0_ring_get_rptr - get the current read pointer
274 * @ring: amdgpu ring pointer
276 * Get the current rptr from the hardware (VI+).
278 static uint32_t sdma_v3_0_ring_get_rptr(struct amdgpu_ring
*ring
)
282 /* XXX check if swapping is necessary on BE */
283 rptr
= ring
->adev
->wb
.wb
[ring
->rptr_offs
] >> 2;
289 * sdma_v3_0_ring_get_wptr - get the current write pointer
291 * @ring: amdgpu ring pointer
293 * Get the current wptr from the hardware (VI+).
295 static uint32_t sdma_v3_0_ring_get_wptr(struct amdgpu_ring
*ring
)
297 struct amdgpu_device
*adev
= ring
->adev
;
300 if (ring
->use_doorbell
) {
301 /* XXX check if swapping is necessary on BE */
302 wptr
= ring
->adev
->wb
.wb
[ring
->wptr_offs
] >> 2;
304 int me
= (ring
== &ring
->adev
->sdma
.instance
[0].ring
) ? 0 : 1;
306 wptr
= RREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[me
]) >> 2;
313 * sdma_v3_0_ring_set_wptr - commit the write pointer
315 * @ring: amdgpu ring pointer
317 * Write the wptr back to the hardware (VI+).
319 static void sdma_v3_0_ring_set_wptr(struct amdgpu_ring
*ring
)
321 struct amdgpu_device
*adev
= ring
->adev
;
323 if (ring
->use_doorbell
) {
324 /* XXX check if swapping is necessary on BE */
325 adev
->wb
.wb
[ring
->wptr_offs
] = ring
->wptr
<< 2;
326 WDOORBELL32(ring
->doorbell_index
, ring
->wptr
<< 2);
328 int me
= (ring
== &ring
->adev
->sdma
.instance
[0].ring
) ? 0 : 1;
330 WREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[me
], ring
->wptr
<< 2);
334 static void sdma_v3_0_ring_insert_nop(struct amdgpu_ring
*ring
, uint32_t count
)
336 struct amdgpu_sdma_instance
*sdma
= amdgpu_get_sdma_instance(ring
);
339 for (i
= 0; i
< count
; i
++)
340 if (sdma
&& sdma
->burst_nop
&& (i
== 0))
341 amdgpu_ring_write(ring
, ring
->nop
|
342 SDMA_PKT_NOP_HEADER_COUNT(count
- 1));
344 amdgpu_ring_write(ring
, ring
->nop
);
348 * sdma_v3_0_ring_emit_ib - Schedule an IB on the DMA engine
350 * @ring: amdgpu ring pointer
351 * @ib: IB object to schedule
353 * Schedule an IB in the DMA ring (VI).
355 static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring
*ring
,
356 struct amdgpu_ib
*ib
)
358 u32 vmid
= (ib
->vm
? ib
->vm
->ids
[ring
->idx
].id
: 0) & 0xf;
359 u32 next_rptr
= ring
->wptr
+ 5;
361 while ((next_rptr
& 7) != 2)
365 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
366 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR
));
367 amdgpu_ring_write(ring
, lower_32_bits(ring
->next_rptr_gpu_addr
) & 0xfffffffc);
368 amdgpu_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
));
369 amdgpu_ring_write(ring
, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
370 amdgpu_ring_write(ring
, next_rptr
);
372 /* IB packet must end on a 8 DW boundary */
373 sdma_v3_0_ring_insert_nop(ring
, (10 - (ring
->wptr
& 7)) % 8);
375 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_INDIRECT
) |
376 SDMA_PKT_INDIRECT_HEADER_VMID(vmid
));
377 /* base must be 32 byte aligned */
378 amdgpu_ring_write(ring
, lower_32_bits(ib
->gpu_addr
) & 0xffffffe0);
379 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
));
380 amdgpu_ring_write(ring
, ib
->length_dw
);
381 amdgpu_ring_write(ring
, 0);
382 amdgpu_ring_write(ring
, 0);
387 * sdma_v3_0_ring_emit_hdp_flush - emit an hdp flush on the DMA ring
389 * @ring: amdgpu ring pointer
391 * Emit an hdp flush packet on the requested DMA ring.
393 static void sdma_v3_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
395 u32 ref_and_mask
= 0;
397 if (ring
== &ring
->adev
->sdma
.instance
[0].ring
)
398 ref_and_mask
= REG_SET_FIELD(ref_and_mask
, GPU_HDP_FLUSH_DONE
, SDMA0
, 1);
400 ref_and_mask
= REG_SET_FIELD(ref_and_mask
, GPU_HDP_FLUSH_DONE
, SDMA1
, 1);
402 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM
) |
403 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) |
404 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */
405 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_DONE
<< 2);
406 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_REQ
<< 2);
407 amdgpu_ring_write(ring
, ref_and_mask
); /* reference */
408 amdgpu_ring_write(ring
, ref_and_mask
); /* mask */
409 amdgpu_ring_write(ring
, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
410 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
414 * sdma_v3_0_ring_emit_fence - emit a fence on the DMA ring
416 * @ring: amdgpu ring pointer
417 * @fence: amdgpu fence object
419 * Add a DMA fence packet to the ring to write
420 * the fence seq number and DMA trap packet to generate
421 * an interrupt if needed (VI).
423 static void sdma_v3_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
, u64 seq
,
426 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
427 /* write the fence */
428 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE
));
429 amdgpu_ring_write(ring
, lower_32_bits(addr
));
430 amdgpu_ring_write(ring
, upper_32_bits(addr
));
431 amdgpu_ring_write(ring
, lower_32_bits(seq
));
433 /* optionally write high bits as well */
436 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_FENCE
));
437 amdgpu_ring_write(ring
, lower_32_bits(addr
));
438 amdgpu_ring_write(ring
, upper_32_bits(addr
));
439 amdgpu_ring_write(ring
, upper_32_bits(seq
));
442 /* generate an interrupt */
443 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_TRAP
));
444 amdgpu_ring_write(ring
, SDMA_PKT_TRAP_INT_CONTEXT_INT_CONTEXT(0));
448 * sdma_v3_0_gfx_stop - stop the gfx async dma engines
450 * @adev: amdgpu_device pointer
452 * Stop the gfx async dma ring buffers (VI).
454 static void sdma_v3_0_gfx_stop(struct amdgpu_device
*adev
)
456 struct amdgpu_ring
*sdma0
= &adev
->sdma
.instance
[0].ring
;
457 struct amdgpu_ring
*sdma1
= &adev
->sdma
.instance
[1].ring
;
458 u32 rb_cntl
, ib_cntl
;
461 if ((adev
->mman
.buffer_funcs_ring
== sdma0
) ||
462 (adev
->mman
.buffer_funcs_ring
== sdma1
))
463 amdgpu_ttm_set_active_vram_size(adev
, adev
->mc
.visible_vram_size
);
465 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
466 rb_cntl
= RREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
]);
467 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_ENABLE
, 0);
468 WREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
], rb_cntl
);
469 ib_cntl
= RREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
]);
470 ib_cntl
= REG_SET_FIELD(ib_cntl
, SDMA0_GFX_IB_CNTL
, IB_ENABLE
, 0);
471 WREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
], ib_cntl
);
473 sdma0
->ready
= false;
474 sdma1
->ready
= false;
478 * sdma_v3_0_rlc_stop - stop the compute async dma engines
480 * @adev: amdgpu_device pointer
482 * Stop the compute async dma queues (VI).
484 static void sdma_v3_0_rlc_stop(struct amdgpu_device
*adev
)
490 * sdma_v3_0_ctx_switch_enable - stop the async dma engines context switch
492 * @adev: amdgpu_device pointer
493 * @enable: enable/disable the DMA MEs context switch.
495 * Halt or unhalt the async dma engines context switch (VI).
497 static void sdma_v3_0_ctx_switch_enable(struct amdgpu_device
*adev
, bool enable
)
502 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
503 f32_cntl
= RREG32(mmSDMA0_CNTL
+ sdma_offsets
[i
]);
505 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_CNTL
,
506 AUTO_CTXSW_ENABLE
, 1);
508 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_CNTL
,
509 AUTO_CTXSW_ENABLE
, 0);
510 WREG32(mmSDMA0_CNTL
+ sdma_offsets
[i
], f32_cntl
);
515 * sdma_v3_0_enable - stop the async dma engines
517 * @adev: amdgpu_device pointer
518 * @enable: enable/disable the DMA MEs.
520 * Halt or unhalt the async dma engines (VI).
522 static void sdma_v3_0_enable(struct amdgpu_device
*adev
, bool enable
)
527 if (enable
== false) {
528 sdma_v3_0_gfx_stop(adev
);
529 sdma_v3_0_rlc_stop(adev
);
532 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
533 f32_cntl
= RREG32(mmSDMA0_F32_CNTL
+ sdma_offsets
[i
]);
535 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_F32_CNTL
, HALT
, 0);
537 f32_cntl
= REG_SET_FIELD(f32_cntl
, SDMA0_F32_CNTL
, HALT
, 1);
538 WREG32(mmSDMA0_F32_CNTL
+ sdma_offsets
[i
], f32_cntl
);
543 * sdma_v3_0_gfx_resume - setup and start the async dma engines
545 * @adev: amdgpu_device pointer
547 * Set up the gfx DMA ring buffers and enable them (VI).
548 * Returns 0 for success, error for failure.
550 static int sdma_v3_0_gfx_resume(struct amdgpu_device
*adev
)
552 struct amdgpu_ring
*ring
;
553 u32 rb_cntl
, ib_cntl
;
559 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
560 ring
= &adev
->sdma
.instance
[i
].ring
;
561 wb_offset
= (ring
->rptr_offs
* 4);
563 mutex_lock(&adev
->srbm_mutex
);
564 for (j
= 0; j
< 16; j
++) {
565 vi_srbm_select(adev
, 0, 0, 0, j
);
567 WREG32(mmSDMA0_GFX_VIRTUAL_ADDR
+ sdma_offsets
[i
], 0);
568 WREG32(mmSDMA0_GFX_APE1_CNTL
+ sdma_offsets
[i
], 0);
570 vi_srbm_select(adev
, 0, 0, 0, 0);
571 mutex_unlock(&adev
->srbm_mutex
);
573 WREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ sdma_offsets
[i
], 0);
575 /* Set ring buffer size in dwords */
576 rb_bufsz
= order_base_2(ring
->ring_size
/ 4);
577 rb_cntl
= RREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
]);
578 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_SIZE
, rb_bufsz
);
580 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_SWAP_ENABLE
, 1);
581 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
,
582 RPTR_WRITEBACK_SWAP_ENABLE
, 1);
584 WREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
], rb_cntl
);
586 /* Initialize the ring buffer's read and write pointers */
587 WREG32(mmSDMA0_GFX_RB_RPTR
+ sdma_offsets
[i
], 0);
588 WREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[i
], 0);
590 /* set the wb address whether it's enabled or not */
591 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI
+ sdma_offsets
[i
],
592 upper_32_bits(adev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFF);
593 WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO
+ sdma_offsets
[i
],
594 lower_32_bits(adev
->wb
.gpu_addr
+ wb_offset
) & 0xFFFFFFFC);
596 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RPTR_WRITEBACK_ENABLE
, 1);
598 WREG32(mmSDMA0_GFX_RB_BASE
+ sdma_offsets
[i
], ring
->gpu_addr
>> 8);
599 WREG32(mmSDMA0_GFX_RB_BASE_HI
+ sdma_offsets
[i
], ring
->gpu_addr
>> 40);
602 WREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[i
], ring
->wptr
<< 2);
604 doorbell
= RREG32(mmSDMA0_GFX_DOORBELL
+ sdma_offsets
[i
]);
606 if (ring
->use_doorbell
) {
607 doorbell
= REG_SET_FIELD(doorbell
, SDMA0_GFX_DOORBELL
,
608 OFFSET
, ring
->doorbell_index
);
609 doorbell
= REG_SET_FIELD(doorbell
, SDMA0_GFX_DOORBELL
, ENABLE
, 1);
611 doorbell
= REG_SET_FIELD(doorbell
, SDMA0_GFX_DOORBELL
, ENABLE
, 0);
613 WREG32(mmSDMA0_GFX_DOORBELL
+ sdma_offsets
[i
], doorbell
);
616 rb_cntl
= REG_SET_FIELD(rb_cntl
, SDMA0_GFX_RB_CNTL
, RB_ENABLE
, 1);
617 WREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
], rb_cntl
);
619 ib_cntl
= RREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
]);
620 ib_cntl
= REG_SET_FIELD(ib_cntl
, SDMA0_GFX_IB_CNTL
, IB_ENABLE
, 1);
622 ib_cntl
= REG_SET_FIELD(ib_cntl
, SDMA0_GFX_IB_CNTL
, IB_SWAP_ENABLE
, 1);
625 WREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
], ib_cntl
);
629 r
= amdgpu_ring_test_ring(ring
);
635 if (adev
->mman
.buffer_funcs_ring
== ring
)
636 amdgpu_ttm_set_active_vram_size(adev
, adev
->mc
.real_vram_size
);
643 * sdma_v3_0_rlc_resume - setup and start the async dma engines
645 * @adev: amdgpu_device pointer
647 * Set up the compute DMA queues and enable them (VI).
648 * Returns 0 for success, error for failure.
650 static int sdma_v3_0_rlc_resume(struct amdgpu_device
*adev
)
657 * sdma_v3_0_load_microcode - load the sDMA ME ucode
659 * @adev: amdgpu_device pointer
661 * Loads the sDMA0/1 ucode.
662 * Returns 0 for success, -EINVAL if the ucode is not available.
664 static int sdma_v3_0_load_microcode(struct amdgpu_device
*adev
)
666 const struct sdma_firmware_header_v1_0
*hdr
;
667 const __le32
*fw_data
;
672 sdma_v3_0_enable(adev
, false);
674 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
675 if (!adev
->sdma
.instance
[i
].fw
)
677 hdr
= (const struct sdma_firmware_header_v1_0
*)adev
->sdma
.instance
[i
].fw
->data
;
678 amdgpu_ucode_print_sdma_hdr(&hdr
->header
);
679 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
680 fw_data
= (const __le32
*)
681 (adev
->sdma
.instance
[i
].fw
->data
+
682 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
683 WREG32(mmSDMA0_UCODE_ADDR
+ sdma_offsets
[i
], 0);
684 for (j
= 0; j
< fw_size
; j
++)
685 WREG32(mmSDMA0_UCODE_DATA
+ sdma_offsets
[i
], le32_to_cpup(fw_data
++));
686 WREG32(mmSDMA0_UCODE_ADDR
+ sdma_offsets
[i
], adev
->sdma
.instance
[i
].fw_version
);
693 * sdma_v3_0_start - setup and start the async dma engines
695 * @adev: amdgpu_device pointer
697 * Set up the DMA engines and enable them (VI).
698 * Returns 0 for success, error for failure.
700 static int sdma_v3_0_start(struct amdgpu_device
*adev
)
704 if (!adev
->pp_enabled
) {
705 if (!adev
->firmware
.smu_load
) {
706 r
= sdma_v3_0_load_microcode(adev
);
710 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
711 r
= adev
->smu
.smumgr_funcs
->check_fw_load_finish(adev
,
713 AMDGPU_UCODE_ID_SDMA0
:
714 AMDGPU_UCODE_ID_SDMA1
);
722 sdma_v3_0_enable(adev
, true);
723 /* enable sdma ring preemption */
724 sdma_v3_0_ctx_switch_enable(adev
, true);
726 /* start the gfx rings and rlc compute queues */
727 r
= sdma_v3_0_gfx_resume(adev
);
730 r
= sdma_v3_0_rlc_resume(adev
);
738 * sdma_v3_0_ring_test_ring - simple async dma engine test
740 * @ring: amdgpu_ring structure holding ring information
742 * Test the DMA engine by writing using it to write an
743 * value to memory. (VI).
744 * Returns 0 for success, error for failure.
746 static int sdma_v3_0_ring_test_ring(struct amdgpu_ring
*ring
)
748 struct amdgpu_device
*adev
= ring
->adev
;
755 r
= amdgpu_wb_get(adev
, &index
);
757 dev_err(adev
->dev
, "(%d) failed to allocate wb slot\n", r
);
761 gpu_addr
= adev
->wb
.gpu_addr
+ (index
* 4);
763 adev
->wb
.wb
[index
] = cpu_to_le32(tmp
);
765 r
= amdgpu_ring_alloc(ring
, 5);
767 DRM_ERROR("amdgpu: dma failed to lock ring %d (%d).\n", ring
->idx
, r
);
768 amdgpu_wb_free(adev
, index
);
772 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
773 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR
));
774 amdgpu_ring_write(ring
, lower_32_bits(gpu_addr
));
775 amdgpu_ring_write(ring
, upper_32_bits(gpu_addr
));
776 amdgpu_ring_write(ring
, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1));
777 amdgpu_ring_write(ring
, 0xDEADBEEF);
778 amdgpu_ring_commit(ring
);
780 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
781 tmp
= le32_to_cpu(adev
->wb
.wb
[index
]);
782 if (tmp
== 0xDEADBEEF)
787 if (i
< adev
->usec_timeout
) {
788 DRM_INFO("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
790 DRM_ERROR("amdgpu: ring %d test failed (0x%08X)\n",
794 amdgpu_wb_free(adev
, index
);
800 * sdma_v3_0_ring_test_ib - test an IB on the DMA engine
802 * @ring: amdgpu_ring structure holding ring information
804 * Test a simple IB in the DMA ring (VI).
805 * Returns 0 on success, error on failure.
807 static int sdma_v3_0_ring_test_ib(struct amdgpu_ring
*ring
)
809 struct amdgpu_device
*adev
= ring
->adev
;
811 struct fence
*f
= NULL
;
818 r
= amdgpu_wb_get(adev
, &index
);
820 dev_err(adev
->dev
, "(%d) failed to allocate wb slot\n", r
);
824 gpu_addr
= adev
->wb
.gpu_addr
+ (index
* 4);
826 adev
->wb
.wb
[index
] = cpu_to_le32(tmp
);
827 memset(&ib
, 0, sizeof(ib
));
828 r
= amdgpu_ib_get(ring
, NULL
, 256, &ib
);
830 DRM_ERROR("amdgpu: failed to get ib (%d).\n", r
);
834 ib
.ptr
[0] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
835 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR
);
836 ib
.ptr
[1] = lower_32_bits(gpu_addr
);
837 ib
.ptr
[2] = upper_32_bits(gpu_addr
);
838 ib
.ptr
[3] = SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1);
839 ib
.ptr
[4] = 0xDEADBEEF;
840 ib
.ptr
[5] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
);
841 ib
.ptr
[6] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
);
842 ib
.ptr
[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
);
845 r
= amdgpu_sched_ib_submit_kernel_helper(adev
, ring
, &ib
, 1, NULL
,
846 AMDGPU_FENCE_OWNER_UNDEFINED
,
851 r
= fence_wait(f
, false);
853 DRM_ERROR("amdgpu: fence wait failed (%d).\n", r
);
856 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
857 tmp
= le32_to_cpu(adev
->wb
.wb
[index
]);
858 if (tmp
== 0xDEADBEEF)
862 if (i
< adev
->usec_timeout
) {
863 DRM_INFO("ib test on ring %d succeeded in %u usecs\n",
867 DRM_ERROR("amdgpu: ib test failed (0x%08X)\n", tmp
);
872 amdgpu_ib_free(adev
, &ib
);
874 amdgpu_wb_free(adev
, index
);
879 * sdma_v3_0_vm_copy_pte - update PTEs by copying them from the GART
881 * @ib: indirect buffer to fill with commands
882 * @pe: addr of the page entry
883 * @src: src addr to copy from
884 * @count: number of page entries to update
886 * Update PTEs by copying them from the GART using sDMA (CIK).
888 static void sdma_v3_0_vm_copy_pte(struct amdgpu_ib
*ib
,
889 uint64_t pe
, uint64_t src
,
893 unsigned bytes
= count
* 8;
894 if (bytes
> 0x1FFFF8)
897 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY
) |
898 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR
);
899 ib
->ptr
[ib
->length_dw
++] = bytes
;
900 ib
->ptr
[ib
->length_dw
++] = 0; /* src/dst endian swap */
901 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(src
);
902 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(src
);
903 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(pe
);
904 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
913 * sdma_v3_0_vm_write_pte - update PTEs by writing them manually
915 * @ib: indirect buffer to fill with commands
916 * @pe: addr of the page entry
917 * @addr: dst addr to write into pe
918 * @count: number of page entries to update
919 * @incr: increase next addr by incr bytes
920 * @flags: access flags
922 * Update PTEs by writing them manually using sDMA (CIK).
924 static void sdma_v3_0_vm_write_pte(struct amdgpu_ib
*ib
,
925 const dma_addr_t
*pages_addr
, uint64_t pe
,
926 uint64_t addr
, unsigned count
,
927 uint32_t incr
, uint32_t flags
)
937 /* for non-physically contiguous pages (system) */
938 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_WRITE
) |
939 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR
);
940 ib
->ptr
[ib
->length_dw
++] = pe
;
941 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
942 ib
->ptr
[ib
->length_dw
++] = ndw
;
943 for (; ndw
> 0; ndw
-= 2, --count
, pe
+= 8) {
944 value
= amdgpu_vm_map_gart(pages_addr
, addr
);
947 ib
->ptr
[ib
->length_dw
++] = value
;
948 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
954 * sdma_v3_0_vm_set_pte_pde - update the page tables using sDMA
956 * @ib: indirect buffer to fill with commands
957 * @pe: addr of the page entry
958 * @addr: dst addr to write into pe
959 * @count: number of page entries to update
960 * @incr: increase next addr by incr bytes
961 * @flags: access flags
963 * Update the page tables using sDMA (CIK).
965 static void sdma_v3_0_vm_set_pte_pde(struct amdgpu_ib
*ib
,
967 uint64_t addr
, unsigned count
,
968 uint32_t incr
, uint32_t flags
)
978 if (flags
& AMDGPU_PTE_VALID
)
983 /* for physically contiguous pages (vram) */
984 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_GEN_PTEPDE
);
985 ib
->ptr
[ib
->length_dw
++] = pe
; /* dst addr */
986 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(pe
);
987 ib
->ptr
[ib
->length_dw
++] = flags
; /* mask */
988 ib
->ptr
[ib
->length_dw
++] = 0;
989 ib
->ptr
[ib
->length_dw
++] = value
; /* value */
990 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(value
);
991 ib
->ptr
[ib
->length_dw
++] = incr
; /* increment size */
992 ib
->ptr
[ib
->length_dw
++] = 0;
993 ib
->ptr
[ib
->length_dw
++] = ndw
; /* number of entries */
1002 * sdma_v3_0_ring_pad_ib - pad the IB to the required number of dw
1004 * @ib: indirect buffer to fill with padding
1007 static void sdma_v3_0_ring_pad_ib(struct amdgpu_ring
*ring
, struct amdgpu_ib
*ib
)
1009 struct amdgpu_sdma_instance
*sdma
= amdgpu_get_sdma_instance(ring
);
1013 pad_count
= (8 - (ib
->length_dw
& 0x7)) % 8;
1014 for (i
= 0; i
< pad_count
; i
++)
1015 if (sdma
&& sdma
->burst_nop
&& (i
== 0))
1016 ib
->ptr
[ib
->length_dw
++] =
1017 SDMA_PKT_HEADER_OP(SDMA_OP_NOP
) |
1018 SDMA_PKT_NOP_HEADER_COUNT(pad_count
- 1);
1020 ib
->ptr
[ib
->length_dw
++] =
1021 SDMA_PKT_HEADER_OP(SDMA_OP_NOP
);
1025 * sdma_v3_0_ring_emit_vm_flush - cik vm flush using sDMA
1027 * @ring: amdgpu_ring pointer
1028 * @vm: amdgpu_vm pointer
1030 * Update the page table base and flush the VM TLB
1033 static void sdma_v3_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
1034 unsigned vm_id
, uint64_t pd_addr
)
1036 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE
) |
1037 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1039 amdgpu_ring_write(ring
, (mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ vm_id
));
1041 amdgpu_ring_write(ring
, (mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ vm_id
- 8));
1043 amdgpu_ring_write(ring
, pd_addr
>> 12);
1046 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_SRBM_WRITE
) |
1047 SDMA_PKT_SRBM_WRITE_HEADER_BYTE_EN(0xf));
1048 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
1049 amdgpu_ring_write(ring
, 1 << vm_id
);
1051 /* wait for flush */
1052 amdgpu_ring_write(ring
, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM
) |
1053 SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(0) |
1054 SDMA_PKT_POLL_REGMEM_HEADER_FUNC(0)); /* always */
1055 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
<< 2);
1056 amdgpu_ring_write(ring
, 0);
1057 amdgpu_ring_write(ring
, 0); /* reference */
1058 amdgpu_ring_write(ring
, 0); /* mask */
1059 amdgpu_ring_write(ring
, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1060 SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(10)); /* retry count, poll interval */
1063 static int sdma_v3_0_early_init(void *handle
)
1065 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1067 switch (adev
->asic_type
) {
1069 adev
->sdma
.num_instances
= 1;
1072 adev
->sdma
.num_instances
= SDMA_MAX_INSTANCE
;
1076 sdma_v3_0_set_ring_funcs(adev
);
1077 sdma_v3_0_set_buffer_funcs(adev
);
1078 sdma_v3_0_set_vm_pte_funcs(adev
);
1079 sdma_v3_0_set_irq_funcs(adev
);
1084 static int sdma_v3_0_sw_init(void *handle
)
1086 struct amdgpu_ring
*ring
;
1088 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1090 /* SDMA trap event */
1091 r
= amdgpu_irq_add_id(adev
, 224, &adev
->sdma
.trap_irq
);
1095 /* SDMA Privileged inst */
1096 r
= amdgpu_irq_add_id(adev
, 241, &adev
->sdma
.illegal_inst_irq
);
1100 /* SDMA Privileged inst */
1101 r
= amdgpu_irq_add_id(adev
, 247, &adev
->sdma
.illegal_inst_irq
);
1105 r
= sdma_v3_0_init_microcode(adev
);
1107 DRM_ERROR("Failed to load sdma firmware!\n");
1111 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
1112 ring
= &adev
->sdma
.instance
[i
].ring
;
1113 ring
->ring_obj
= NULL
;
1114 ring
->use_doorbell
= true;
1115 ring
->doorbell_index
= (i
== 0) ?
1116 AMDGPU_DOORBELL_sDMA_ENGINE0
: AMDGPU_DOORBELL_sDMA_ENGINE1
;
1118 sprintf(ring
->name
, "sdma%d", i
);
1119 r
= amdgpu_ring_init(adev
, ring
, 256 * 1024,
1120 SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP
), 0xf,
1121 &adev
->sdma
.trap_irq
,
1123 AMDGPU_SDMA_IRQ_TRAP0
: AMDGPU_SDMA_IRQ_TRAP1
,
1124 AMDGPU_RING_TYPE_SDMA
);
1132 static int sdma_v3_0_sw_fini(void *handle
)
1134 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1137 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++)
1138 amdgpu_ring_fini(&adev
->sdma
.instance
[i
].ring
);
1143 static int sdma_v3_0_hw_init(void *handle
)
1146 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1148 sdma_v3_0_init_golden_registers(adev
);
1150 r
= sdma_v3_0_start(adev
);
1157 static int sdma_v3_0_hw_fini(void *handle
)
1159 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1161 sdma_v3_0_ctx_switch_enable(adev
, false);
1162 sdma_v3_0_enable(adev
, false);
1167 static int sdma_v3_0_suspend(void *handle
)
1169 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1171 return sdma_v3_0_hw_fini(adev
);
1174 static int sdma_v3_0_resume(void *handle
)
1176 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1178 return sdma_v3_0_hw_init(adev
);
1181 static bool sdma_v3_0_is_idle(void *handle
)
1183 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1184 u32 tmp
= RREG32(mmSRBM_STATUS2
);
1186 if (tmp
& (SRBM_STATUS2__SDMA_BUSY_MASK
|
1187 SRBM_STATUS2__SDMA1_BUSY_MASK
))
1193 static int sdma_v3_0_wait_for_idle(void *handle
)
1197 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1199 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1200 tmp
= RREG32(mmSRBM_STATUS2
) & (SRBM_STATUS2__SDMA_BUSY_MASK
|
1201 SRBM_STATUS2__SDMA1_BUSY_MASK
);
1210 static void sdma_v3_0_print_status(void *handle
)
1213 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1215 dev_info(adev
->dev
, "VI SDMA registers\n");
1216 dev_info(adev
->dev
, " SRBM_STATUS2=0x%08X\n",
1217 RREG32(mmSRBM_STATUS2
));
1218 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++) {
1219 dev_info(adev
->dev
, " SDMA%d_STATUS_REG=0x%08X\n",
1220 i
, RREG32(mmSDMA0_STATUS_REG
+ sdma_offsets
[i
]));
1221 dev_info(adev
->dev
, " SDMA%d_F32_CNTL=0x%08X\n",
1222 i
, RREG32(mmSDMA0_F32_CNTL
+ sdma_offsets
[i
]));
1223 dev_info(adev
->dev
, " SDMA%d_CNTL=0x%08X\n",
1224 i
, RREG32(mmSDMA0_CNTL
+ sdma_offsets
[i
]));
1225 dev_info(adev
->dev
, " SDMA%d_SEM_WAIT_FAIL_TIMER_CNTL=0x%08X\n",
1226 i
, RREG32(mmSDMA0_SEM_WAIT_FAIL_TIMER_CNTL
+ sdma_offsets
[i
]));
1227 dev_info(adev
->dev
, " SDMA%d_GFX_IB_CNTL=0x%08X\n",
1228 i
, RREG32(mmSDMA0_GFX_IB_CNTL
+ sdma_offsets
[i
]));
1229 dev_info(adev
->dev
, " SDMA%d_GFX_RB_CNTL=0x%08X\n",
1230 i
, RREG32(mmSDMA0_GFX_RB_CNTL
+ sdma_offsets
[i
]));
1231 dev_info(adev
->dev
, " SDMA%d_GFX_RB_RPTR=0x%08X\n",
1232 i
, RREG32(mmSDMA0_GFX_RB_RPTR
+ sdma_offsets
[i
]));
1233 dev_info(adev
->dev
, " SDMA%d_GFX_RB_WPTR=0x%08X\n",
1234 i
, RREG32(mmSDMA0_GFX_RB_WPTR
+ sdma_offsets
[i
]));
1235 dev_info(adev
->dev
, " SDMA%d_GFX_RB_RPTR_ADDR_HI=0x%08X\n",
1236 i
, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI
+ sdma_offsets
[i
]));
1237 dev_info(adev
->dev
, " SDMA%d_GFX_RB_RPTR_ADDR_LO=0x%08X\n",
1238 i
, RREG32(mmSDMA0_GFX_RB_RPTR_ADDR_LO
+ sdma_offsets
[i
]));
1239 dev_info(adev
->dev
, " SDMA%d_GFX_RB_BASE=0x%08X\n",
1240 i
, RREG32(mmSDMA0_GFX_RB_BASE
+ sdma_offsets
[i
]));
1241 dev_info(adev
->dev
, " SDMA%d_GFX_RB_BASE_HI=0x%08X\n",
1242 i
, RREG32(mmSDMA0_GFX_RB_BASE_HI
+ sdma_offsets
[i
]));
1243 dev_info(adev
->dev
, " SDMA%d_GFX_DOORBELL=0x%08X\n",
1244 i
, RREG32(mmSDMA0_GFX_DOORBELL
+ sdma_offsets
[i
]));
1245 mutex_lock(&adev
->srbm_mutex
);
1246 for (j
= 0; j
< 16; j
++) {
1247 vi_srbm_select(adev
, 0, 0, 0, j
);
1248 dev_info(adev
->dev
, " VM %d:\n", j
);
1249 dev_info(adev
->dev
, " SDMA%d_GFX_VIRTUAL_ADDR=0x%08X\n",
1250 i
, RREG32(mmSDMA0_GFX_VIRTUAL_ADDR
+ sdma_offsets
[i
]));
1251 dev_info(adev
->dev
, " SDMA%d_GFX_APE1_CNTL=0x%08X\n",
1252 i
, RREG32(mmSDMA0_GFX_APE1_CNTL
+ sdma_offsets
[i
]));
1254 vi_srbm_select(adev
, 0, 0, 0, 0);
1255 mutex_unlock(&adev
->srbm_mutex
);
1259 static int sdma_v3_0_soft_reset(void *handle
)
1261 u32 srbm_soft_reset
= 0;
1262 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1263 u32 tmp
= RREG32(mmSRBM_STATUS2
);
1265 if (tmp
& SRBM_STATUS2__SDMA_BUSY_MASK
) {
1267 tmp
= RREG32(mmSDMA0_F32_CNTL
+ SDMA0_REGISTER_OFFSET
);
1268 tmp
= REG_SET_FIELD(tmp
, SDMA0_F32_CNTL
, HALT
, 0);
1269 WREG32(mmSDMA0_F32_CNTL
+ SDMA0_REGISTER_OFFSET
, tmp
);
1270 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_SDMA_MASK
;
1272 if (tmp
& SRBM_STATUS2__SDMA1_BUSY_MASK
) {
1274 tmp
= RREG32(mmSDMA0_F32_CNTL
+ SDMA1_REGISTER_OFFSET
);
1275 tmp
= REG_SET_FIELD(tmp
, SDMA0_F32_CNTL
, HALT
, 0);
1276 WREG32(mmSDMA0_F32_CNTL
+ SDMA1_REGISTER_OFFSET
, tmp
);
1277 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_SDMA1_MASK
;
1280 if (srbm_soft_reset
) {
1281 sdma_v3_0_print_status((void *)adev
);
1283 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1284 tmp
|= srbm_soft_reset
;
1285 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1286 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1287 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1291 tmp
&= ~srbm_soft_reset
;
1292 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1293 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1295 /* Wait a little for things to settle down */
1298 sdma_v3_0_print_status((void *)adev
);
1304 static int sdma_v3_0_set_trap_irq_state(struct amdgpu_device
*adev
,
1305 struct amdgpu_irq_src
*source
,
1307 enum amdgpu_interrupt_state state
)
1312 case AMDGPU_SDMA_IRQ_TRAP0
:
1314 case AMDGPU_IRQ_STATE_DISABLE
:
1315 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
);
1316 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 0);
1317 WREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
, sdma_cntl
);
1319 case AMDGPU_IRQ_STATE_ENABLE
:
1320 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
);
1321 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 1);
1322 WREG32(mmSDMA0_CNTL
+ SDMA0_REGISTER_OFFSET
, sdma_cntl
);
1328 case AMDGPU_SDMA_IRQ_TRAP1
:
1330 case AMDGPU_IRQ_STATE_DISABLE
:
1331 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
);
1332 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 0);
1333 WREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
, sdma_cntl
);
1335 case AMDGPU_IRQ_STATE_ENABLE
:
1336 sdma_cntl
= RREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
);
1337 sdma_cntl
= REG_SET_FIELD(sdma_cntl
, SDMA0_CNTL
, TRAP_ENABLE
, 1);
1338 WREG32(mmSDMA0_CNTL
+ SDMA1_REGISTER_OFFSET
, sdma_cntl
);
1350 static int sdma_v3_0_process_trap_irq(struct amdgpu_device
*adev
,
1351 struct amdgpu_irq_src
*source
,
1352 struct amdgpu_iv_entry
*entry
)
1354 u8 instance_id
, queue_id
;
1356 instance_id
= (entry
->ring_id
& 0x3) >> 0;
1357 queue_id
= (entry
->ring_id
& 0xc) >> 2;
1358 DRM_DEBUG("IH: SDMA trap\n");
1359 switch (instance_id
) {
1363 amdgpu_fence_process(&adev
->sdma
.instance
[0].ring
);
1376 amdgpu_fence_process(&adev
->sdma
.instance
[1].ring
);
1390 static int sdma_v3_0_process_illegal_inst_irq(struct amdgpu_device
*adev
,
1391 struct amdgpu_irq_src
*source
,
1392 struct amdgpu_iv_entry
*entry
)
1394 DRM_ERROR("Illegal instruction in SDMA command stream\n");
1395 schedule_work(&adev
->reset_work
);
1399 static void fiji_update_sdma_medium_grain_clock_gating(
1400 struct amdgpu_device
*adev
,
1403 uint32_t temp
, data
;
1406 temp
= data
= RREG32(mmSDMA0_CLK_CTRL
);
1407 data
&= ~(SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK
|
1408 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK
|
1409 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK
|
1410 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK
|
1411 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK
|
1412 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK
|
1413 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK
|
1414 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK
);
1416 WREG32(mmSDMA0_CLK_CTRL
, data
);
1418 temp
= data
= RREG32(mmSDMA1_CLK_CTRL
);
1419 data
&= ~(SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK
|
1420 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK
|
1421 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK
|
1422 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK
|
1423 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK
|
1424 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK
|
1425 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK
|
1426 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK
);
1429 WREG32(mmSDMA1_CLK_CTRL
, data
);
1431 temp
= data
= RREG32(mmSDMA0_CLK_CTRL
);
1432 data
|= SDMA0_CLK_CTRL__SOFT_OVERRIDE7_MASK
|
1433 SDMA0_CLK_CTRL__SOFT_OVERRIDE6_MASK
|
1434 SDMA0_CLK_CTRL__SOFT_OVERRIDE5_MASK
|
1435 SDMA0_CLK_CTRL__SOFT_OVERRIDE4_MASK
|
1436 SDMA0_CLK_CTRL__SOFT_OVERRIDE3_MASK
|
1437 SDMA0_CLK_CTRL__SOFT_OVERRIDE2_MASK
|
1438 SDMA0_CLK_CTRL__SOFT_OVERRIDE1_MASK
|
1439 SDMA0_CLK_CTRL__SOFT_OVERRIDE0_MASK
;
1442 WREG32(mmSDMA0_CLK_CTRL
, data
);
1444 temp
= data
= RREG32(mmSDMA1_CLK_CTRL
);
1445 data
|= SDMA1_CLK_CTRL__SOFT_OVERRIDE7_MASK
|
1446 SDMA1_CLK_CTRL__SOFT_OVERRIDE6_MASK
|
1447 SDMA1_CLK_CTRL__SOFT_OVERRIDE5_MASK
|
1448 SDMA1_CLK_CTRL__SOFT_OVERRIDE4_MASK
|
1449 SDMA1_CLK_CTRL__SOFT_OVERRIDE3_MASK
|
1450 SDMA1_CLK_CTRL__SOFT_OVERRIDE2_MASK
|
1451 SDMA1_CLK_CTRL__SOFT_OVERRIDE1_MASK
|
1452 SDMA1_CLK_CTRL__SOFT_OVERRIDE0_MASK
;
1455 WREG32(mmSDMA1_CLK_CTRL
, data
);
1459 static void fiji_update_sdma_medium_grain_light_sleep(
1460 struct amdgpu_device
*adev
,
1463 uint32_t temp
, data
;
1466 temp
= data
= RREG32(mmSDMA0_POWER_CNTL
);
1467 data
|= SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK
;
1470 WREG32(mmSDMA0_POWER_CNTL
, data
);
1472 temp
= data
= RREG32(mmSDMA1_POWER_CNTL
);
1473 data
|= SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK
;
1476 WREG32(mmSDMA1_POWER_CNTL
, data
);
1478 temp
= data
= RREG32(mmSDMA0_POWER_CNTL
);
1479 data
&= ~SDMA0_POWER_CNTL__MEM_POWER_OVERRIDE_MASK
;
1482 WREG32(mmSDMA0_POWER_CNTL
, data
);
1484 temp
= data
= RREG32(mmSDMA1_POWER_CNTL
);
1485 data
&= ~SDMA1_POWER_CNTL__MEM_POWER_OVERRIDE_MASK
;
1488 WREG32(mmSDMA1_POWER_CNTL
, data
);
1492 static int sdma_v3_0_set_clockgating_state(void *handle
,
1493 enum amd_clockgating_state state
)
1495 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1497 switch (adev
->asic_type
) {
1499 fiji_update_sdma_medium_grain_clock_gating(adev
,
1500 state
== AMD_CG_STATE_GATE
? true : false);
1501 fiji_update_sdma_medium_grain_light_sleep(adev
,
1502 state
== AMD_CG_STATE_GATE
? true : false);
1510 static int sdma_v3_0_set_powergating_state(void *handle
,
1511 enum amd_powergating_state state
)
1516 const struct amd_ip_funcs sdma_v3_0_ip_funcs
= {
1517 .early_init
= sdma_v3_0_early_init
,
1519 .sw_init
= sdma_v3_0_sw_init
,
1520 .sw_fini
= sdma_v3_0_sw_fini
,
1521 .hw_init
= sdma_v3_0_hw_init
,
1522 .hw_fini
= sdma_v3_0_hw_fini
,
1523 .suspend
= sdma_v3_0_suspend
,
1524 .resume
= sdma_v3_0_resume
,
1525 .is_idle
= sdma_v3_0_is_idle
,
1526 .wait_for_idle
= sdma_v3_0_wait_for_idle
,
1527 .soft_reset
= sdma_v3_0_soft_reset
,
1528 .print_status
= sdma_v3_0_print_status
,
1529 .set_clockgating_state
= sdma_v3_0_set_clockgating_state
,
1530 .set_powergating_state
= sdma_v3_0_set_powergating_state
,
1533 static const struct amdgpu_ring_funcs sdma_v3_0_ring_funcs
= {
1534 .get_rptr
= sdma_v3_0_ring_get_rptr
,
1535 .get_wptr
= sdma_v3_0_ring_get_wptr
,
1536 .set_wptr
= sdma_v3_0_ring_set_wptr
,
1538 .emit_ib
= sdma_v3_0_ring_emit_ib
,
1539 .emit_fence
= sdma_v3_0_ring_emit_fence
,
1540 .emit_vm_flush
= sdma_v3_0_ring_emit_vm_flush
,
1541 .emit_hdp_flush
= sdma_v3_0_ring_emit_hdp_flush
,
1542 .test_ring
= sdma_v3_0_ring_test_ring
,
1543 .test_ib
= sdma_v3_0_ring_test_ib
,
1544 .insert_nop
= sdma_v3_0_ring_insert_nop
,
1545 .pad_ib
= sdma_v3_0_ring_pad_ib
,
1548 static void sdma_v3_0_set_ring_funcs(struct amdgpu_device
*adev
)
1552 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++)
1553 adev
->sdma
.instance
[i
].ring
.funcs
= &sdma_v3_0_ring_funcs
;
1556 static const struct amdgpu_irq_src_funcs sdma_v3_0_trap_irq_funcs
= {
1557 .set
= sdma_v3_0_set_trap_irq_state
,
1558 .process
= sdma_v3_0_process_trap_irq
,
1561 static const struct amdgpu_irq_src_funcs sdma_v3_0_illegal_inst_irq_funcs
= {
1562 .process
= sdma_v3_0_process_illegal_inst_irq
,
1565 static void sdma_v3_0_set_irq_funcs(struct amdgpu_device
*adev
)
1567 adev
->sdma
.trap_irq
.num_types
= AMDGPU_SDMA_IRQ_LAST
;
1568 adev
->sdma
.trap_irq
.funcs
= &sdma_v3_0_trap_irq_funcs
;
1569 adev
->sdma
.illegal_inst_irq
.funcs
= &sdma_v3_0_illegal_inst_irq_funcs
;
1573 * sdma_v3_0_emit_copy_buffer - copy buffer using the sDMA engine
1575 * @ring: amdgpu_ring structure holding ring information
1576 * @src_offset: src GPU address
1577 * @dst_offset: dst GPU address
1578 * @byte_count: number of bytes to xfer
1580 * Copy GPU buffers using the DMA engine (VI).
1581 * Used by the amdgpu ttm implementation to move pages if
1582 * registered as the asic copy callback.
1584 static void sdma_v3_0_emit_copy_buffer(struct amdgpu_ib
*ib
,
1585 uint64_t src_offset
,
1586 uint64_t dst_offset
,
1587 uint32_t byte_count
)
1589 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_COPY
) |
1590 SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR
);
1591 ib
->ptr
[ib
->length_dw
++] = byte_count
;
1592 ib
->ptr
[ib
->length_dw
++] = 0; /* src/dst endian swap */
1593 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(src_offset
);
1594 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(src_offset
);
1595 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(dst_offset
);
1596 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(dst_offset
);
1600 * sdma_v3_0_emit_fill_buffer - fill buffer using the sDMA engine
1602 * @ring: amdgpu_ring structure holding ring information
1603 * @src_data: value to write to buffer
1604 * @dst_offset: dst GPU address
1605 * @byte_count: number of bytes to xfer
1607 * Fill GPU buffers using the DMA engine (VI).
1609 static void sdma_v3_0_emit_fill_buffer(struct amdgpu_ib
*ib
,
1611 uint64_t dst_offset
,
1612 uint32_t byte_count
)
1614 ib
->ptr
[ib
->length_dw
++] = SDMA_PKT_HEADER_OP(SDMA_OP_CONST_FILL
);
1615 ib
->ptr
[ib
->length_dw
++] = lower_32_bits(dst_offset
);
1616 ib
->ptr
[ib
->length_dw
++] = upper_32_bits(dst_offset
);
1617 ib
->ptr
[ib
->length_dw
++] = src_data
;
1618 ib
->ptr
[ib
->length_dw
++] = byte_count
;
1621 static const struct amdgpu_buffer_funcs sdma_v3_0_buffer_funcs
= {
1622 .copy_max_bytes
= 0x1fffff,
1624 .emit_copy_buffer
= sdma_v3_0_emit_copy_buffer
,
1626 .fill_max_bytes
= 0x1fffff,
1628 .emit_fill_buffer
= sdma_v3_0_emit_fill_buffer
,
1631 static void sdma_v3_0_set_buffer_funcs(struct amdgpu_device
*adev
)
1633 if (adev
->mman
.buffer_funcs
== NULL
) {
1634 adev
->mman
.buffer_funcs
= &sdma_v3_0_buffer_funcs
;
1635 adev
->mman
.buffer_funcs_ring
= &adev
->sdma
.instance
[0].ring
;
1639 static const struct amdgpu_vm_pte_funcs sdma_v3_0_vm_pte_funcs
= {
1640 .copy_pte
= sdma_v3_0_vm_copy_pte
,
1641 .write_pte
= sdma_v3_0_vm_write_pte
,
1642 .set_pte_pde
= sdma_v3_0_vm_set_pte_pde
,
1645 static void sdma_v3_0_set_vm_pte_funcs(struct amdgpu_device
*adev
)
1647 if (adev
->vm_manager
.vm_pte_funcs
== NULL
) {
1648 adev
->vm_manager
.vm_pte_funcs
= &sdma_v3_0_vm_pte_funcs
;
1649 adev
->vm_manager
.vm_pte_funcs_ring
= &adev
->sdma
.instance
[0].ring
;
1650 adev
->vm_manager
.vm_pte_funcs_ring
->is_pte_ring
= true;