2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_ucode.h"
29 #include "gmc/gmc_8_1_d.h"
30 #include "gmc/gmc_8_1_sh_mask.h"
32 #include "bif/bif_5_0_d.h"
33 #include "bif/bif_5_0_sh_mask.h"
35 #include "oss/oss_3_0_d.h"
36 #include "oss/oss_3_0_sh_mask.h"
42 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
);
43 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
);
45 MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
47 static const u32 golden_settings_tonga_a11
[] =
49 mmMC_ARB_WTM_GRPWT_RD
, 0x00000003, 0x00000000,
50 mmMC_HUB_RDREQ_DMIF_LIMIT
, 0x0000007f, 0x00000028,
51 mmMC_HUB_WDP_UMC
, 0x00007fb6, 0x00000991,
52 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
53 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
54 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
55 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
58 static const u32 tonga_mgcg_cgcg_init
[] =
60 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
63 static const u32 golden_settings_fiji_a10
[] =
65 mmVM_PRT_APERTURE0_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
66 mmVM_PRT_APERTURE1_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
67 mmVM_PRT_APERTURE2_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
68 mmVM_PRT_APERTURE3_LOW_ADDR
, 0x0fffffff, 0x0fffffff,
71 static const u32 fiji_mgcg_cgcg_init
[] =
73 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
76 static const u32 cz_mgcg_cgcg_init
[] =
78 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
81 static const u32 stoney_mgcg_cgcg_init
[] =
83 mmMC_MEM_POWER_LS
, 0xffffffff, 0x00000104
87 static void gmc_v8_0_init_golden_registers(struct amdgpu_device
*adev
)
89 switch (adev
->asic_type
) {
91 amdgpu_program_register_sequence(adev
,
93 (const u32
)ARRAY_SIZE(fiji_mgcg_cgcg_init
));
94 amdgpu_program_register_sequence(adev
,
95 golden_settings_fiji_a10
,
96 (const u32
)ARRAY_SIZE(golden_settings_fiji_a10
));
99 amdgpu_program_register_sequence(adev
,
100 tonga_mgcg_cgcg_init
,
101 (const u32
)ARRAY_SIZE(tonga_mgcg_cgcg_init
));
102 amdgpu_program_register_sequence(adev
,
103 golden_settings_tonga_a11
,
104 (const u32
)ARRAY_SIZE(golden_settings_tonga_a11
));
107 amdgpu_program_register_sequence(adev
,
109 (const u32
)ARRAY_SIZE(cz_mgcg_cgcg_init
));
112 amdgpu_program_register_sequence(adev
,
113 stoney_mgcg_cgcg_init
,
114 (const u32
)ARRAY_SIZE(stoney_mgcg_cgcg_init
));
122 * gmc8_mc_wait_for_idle - wait for MC idle callback.
124 * @adev: amdgpu_device pointer
126 * Wait for the MC (memory controller) to be idle.
128 * Returns 0 if the MC is idle, -1 if not.
130 int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device
*adev
)
135 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
137 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__VMC_BUSY_MASK
|
138 SRBM_STATUS__MCB_BUSY_MASK
|
139 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
140 SRBM_STATUS__MCC_BUSY_MASK
|
141 SRBM_STATUS__MCD_BUSY_MASK
|
142 SRBM_STATUS__VMC1_BUSY_MASK
);
150 void gmc_v8_0_mc_stop(struct amdgpu_device
*adev
,
151 struct amdgpu_mode_mc_save
*save
)
155 if (adev
->mode_info
.num_crtc
)
156 amdgpu_display_stop_mc_access(adev
, save
);
158 amdgpu_asic_wait_for_mc_idle(adev
);
160 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
161 if (REG_GET_FIELD(blackout
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
) != 1) {
162 /* Block CPU access */
163 WREG32(mmBIF_FB_EN
, 0);
164 /* blackout the MC */
165 blackout
= REG_SET_FIELD(blackout
,
166 MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 1);
167 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
169 /* wait for the MC to settle */
173 void gmc_v8_0_mc_resume(struct amdgpu_device
*adev
,
174 struct amdgpu_mode_mc_save
*save
)
178 /* unblackout the MC */
179 tmp
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
180 tmp
= REG_SET_FIELD(tmp
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
181 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, tmp
);
182 /* allow CPU access */
183 tmp
= REG_SET_FIELD(0, BIF_FB_EN
, FB_READ_EN
, 1);
184 tmp
= REG_SET_FIELD(tmp
, BIF_FB_EN
, FB_WRITE_EN
, 1);
185 WREG32(mmBIF_FB_EN
, tmp
);
187 if (adev
->mode_info
.num_crtc
)
188 amdgpu_display_resume_mc_access(adev
, save
);
192 * gmc_v8_0_init_microcode - load ucode images from disk
194 * @adev: amdgpu_device pointer
196 * Use the firmware interface to load the ucode images into
197 * the driver (not loaded into hw).
198 * Returns 0 on success, error on failure.
200 static int gmc_v8_0_init_microcode(struct amdgpu_device
*adev
)
202 const char *chip_name
;
208 switch (adev
->asic_type
) {
219 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mc.bin", chip_name
);
220 err
= request_firmware(&adev
->mc
.fw
, fw_name
, adev
->dev
);
223 err
= amdgpu_ucode_validate(adev
->mc
.fw
);
228 "mc: Failed to load firmware \"%s\"\n",
230 release_firmware(adev
->mc
.fw
);
237 * gmc_v8_0_mc_load_microcode - load MC ucode into the hw
239 * @adev: amdgpu_device pointer
241 * Load the GDDR MC ucode into the hw (CIK).
242 * Returns 0 on success, error on failure.
244 static int gmc_v8_0_mc_load_microcode(struct amdgpu_device
*adev
)
246 const struct mc_firmware_header_v1_0
*hdr
;
247 const __le32
*fw_data
= NULL
;
248 const __le32
*io_mc_regs
= NULL
;
249 u32 running
, blackout
= 0;
250 int i
, ucode_size
, regs_size
;
255 /* Skip MC ucode loading on SR-IOV capable boards.
256 * vbios does this for us in asic_init in that case.
258 if (adev
->virtualization
.supports_sr_iov
)
261 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
262 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
264 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
265 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
266 io_mc_regs
= (const __le32
*)
267 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
268 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
269 fw_data
= (const __le32
*)
270 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
272 running
= REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL
), MC_SEQ_SUP_CNTL
, RUN
);
276 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
277 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
280 /* reset the engine and set to writable */
281 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
282 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
284 /* load mc io regs */
285 for (i
= 0; i
< regs_size
; i
++) {
286 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
287 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
289 /* load the MC ucode */
290 for (i
= 0; i
< ucode_size
; i
++)
291 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
293 /* put the engine back into the active state */
294 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
295 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
296 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
298 /* wait for training to complete */
299 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
300 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
301 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D0
))
305 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
306 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
307 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D1
))
313 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
319 static void gmc_v8_0_vram_gtt_location(struct amdgpu_device
*adev
,
320 struct amdgpu_mc
*mc
)
322 if (mc
->mc_vram_size
> 0xFFC0000000ULL
) {
323 /* leave room for at least 1024M GTT */
324 dev_warn(adev
->dev
, "limiting VRAM\n");
325 mc
->real_vram_size
= 0xFFC0000000ULL
;
326 mc
->mc_vram_size
= 0xFFC0000000ULL
;
328 amdgpu_vram_location(adev
, &adev
->mc
, 0);
329 adev
->mc
.gtt_base_align
= 0;
330 amdgpu_gtt_location(adev
, mc
);
334 * gmc_v8_0_mc_program - program the GPU memory controller
336 * @adev: amdgpu_device pointer
338 * Set the location of vram, gart, and AGP in the GPU's
339 * physical address space (CIK).
341 static void gmc_v8_0_mc_program(struct amdgpu_device
*adev
)
343 struct amdgpu_mode_mc_save save
;
348 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
349 WREG32((0xb05 + j
), 0x00000000);
350 WREG32((0xb06 + j
), 0x00000000);
351 WREG32((0xb07 + j
), 0x00000000);
352 WREG32((0xb08 + j
), 0x00000000);
353 WREG32((0xb09 + j
), 0x00000000);
355 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
, 0);
357 if (adev
->mode_info
.num_crtc
)
358 amdgpu_display_set_vga_render_state(adev
, false);
360 gmc_v8_0_mc_stop(adev
, &save
);
361 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
362 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
364 /* Update configuration */
365 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
366 adev
->mc
.vram_start
>> 12);
367 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
368 adev
->mc
.vram_end
>> 12);
369 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
370 adev
->vram_scratch
.gpu_addr
>> 12);
371 tmp
= ((adev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
372 tmp
|= ((adev
->mc
.vram_start
>> 24) & 0xFFFF);
373 WREG32(mmMC_VM_FB_LOCATION
, tmp
);
374 /* XXX double check these! */
375 WREG32(mmHDP_NONSURFACE_BASE
, (adev
->mc
.vram_start
>> 8));
376 WREG32(mmHDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
377 WREG32(mmHDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
378 WREG32(mmMC_VM_AGP_BASE
, 0);
379 WREG32(mmMC_VM_AGP_TOP
, 0x0FFFFFFF);
380 WREG32(mmMC_VM_AGP_BOT
, 0x0FFFFFFF);
381 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
382 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
384 gmc_v8_0_mc_resume(adev
, &save
);
386 WREG32(mmBIF_FB_EN
, BIF_FB_EN__FB_READ_EN_MASK
| BIF_FB_EN__FB_WRITE_EN_MASK
);
388 tmp
= RREG32(mmHDP_MISC_CNTL
);
389 tmp
= REG_SET_FIELD(tmp
, HDP_MISC_CNTL
, FLUSH_INVALIDATE_CACHE
, 0);
390 WREG32(mmHDP_MISC_CNTL
, tmp
);
392 tmp
= RREG32(mmHDP_HOST_PATH_CNTL
);
393 WREG32(mmHDP_HOST_PATH_CNTL
, tmp
);
397 * gmc_v8_0_mc_init - initialize the memory controller driver params
399 * @adev: amdgpu_device pointer
401 * Look up the amount of vram, vram width, and decide how to place
402 * vram and gart within the GPU's physical address space (CIK).
403 * Returns 0 for success.
405 static int gmc_v8_0_mc_init(struct amdgpu_device
*adev
)
408 int chansize
, numchan
;
410 /* Get VRAM informations */
411 tmp
= RREG32(mmMC_ARB_RAMCFG
);
412 if (REG_GET_FIELD(tmp
, MC_ARB_RAMCFG
, CHANSIZE
)) {
417 tmp
= RREG32(mmMC_SHARED_CHMAP
);
418 switch (REG_GET_FIELD(tmp
, MC_SHARED_CHMAP
, NOOFCHAN
)) {
448 adev
->mc
.vram_width
= numchan
* chansize
;
449 /* Could aper size report 0 ? */
450 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
451 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
452 /* size in MB on si */
453 adev
->mc
.mc_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
454 adev
->mc
.real_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
455 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
457 /* In case the PCI BAR is larger than the actual amount of vram */
458 if (adev
->mc
.visible_vram_size
> adev
->mc
.real_vram_size
)
459 adev
->mc
.visible_vram_size
= adev
->mc
.real_vram_size
;
461 /* unless the user had overridden it, set the gart
462 * size equal to the 1024 or vram, whichever is larger.
464 if (amdgpu_gart_size
== -1)
465 adev
->mc
.gtt_size
= max((1024ULL << 20), adev
->mc
.mc_vram_size
);
467 adev
->mc
.gtt_size
= (uint64_t)amdgpu_gart_size
<< 20;
469 gmc_v8_0_vram_gtt_location(adev
, &adev
->mc
);
476 * VMID 0 is the physical GPU addresses as used by the kernel.
477 * VMIDs 1-15 are used for userspace clients and are handled
478 * by the amdgpu vm/hsa code.
482 * gmc_v8_0_gart_flush_gpu_tlb - gart tlb flush callback
484 * @adev: amdgpu_device pointer
485 * @vmid: vm instance to flush
487 * Flush the TLB for the requested page table (CIK).
489 static void gmc_v8_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
492 /* flush hdp cache */
493 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0);
495 /* bits 0-15 are the VM contexts0-15 */
496 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
500 * gmc_v8_0_gart_set_pte_pde - update the page tables using MMIO
502 * @adev: amdgpu_device pointer
503 * @cpu_pt_addr: cpu address of the page table
504 * @gpu_page_idx: entry in the page table to update
505 * @addr: dst addr to write into pte/pde
506 * @flags: access flags
508 * Update the page tables using the CPU.
510 static int gmc_v8_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
512 uint32_t gpu_page_idx
,
516 void __iomem
*ptr
= (void *)cpu_pt_addr
;
522 * 39:12 4k physical page base address
533 * 63:59 block fragment size
535 * 39:1 physical base address of PTE
536 * bits 5:1 must be 0.
539 value
= addr
& 0x000000FFFFFFF000ULL
;
541 writeq(value
, ptr
+ (gpu_page_idx
* 8));
547 * gmc_v8_0_set_fault_enable_default - update VM fault handling
549 * @adev: amdgpu_device pointer
550 * @value: true redirects VM faults to the default page
552 static void gmc_v8_0_set_fault_enable_default(struct amdgpu_device
*adev
,
557 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
558 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
559 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
560 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
561 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
562 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
563 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
564 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
565 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
566 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
567 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
568 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
569 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
570 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
571 EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
572 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
576 * gmc_v8_0_gart_enable - gart enable
578 * @adev: amdgpu_device pointer
580 * This sets up the TLBs, programs the page tables for VMID0,
581 * sets up the hw for VMIDs 1-15 which are allocated on
582 * demand, and sets up the global locations for the LDS, GDS,
583 * and GPUVM for FSA64 clients (CIK).
584 * Returns 0 for success, errors for failure.
586 static int gmc_v8_0_gart_enable(struct amdgpu_device
*adev
)
591 if (adev
->gart
.robj
== NULL
) {
592 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
595 r
= amdgpu_gart_table_vram_pin(adev
);
598 /* Setup TLB control */
599 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
600 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
601 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 1);
602 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE
, 3);
603 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 1);
604 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_APERTURE_UNMAPPED_ACCESS
, 0);
605 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
607 tmp
= RREG32(mmVM_L2_CNTL
);
608 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
609 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
, 1);
610 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
, 1);
611 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
, 1);
612 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, EFFECTIVE_L2_QUEUE_SIZE
, 7);
613 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, CONTEXT1_IDENTITY_ACCESS_MODE
, 1);
614 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY
, 1);
615 WREG32(mmVM_L2_CNTL
, tmp
);
616 tmp
= RREG32(mmVM_L2_CNTL2
);
617 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
618 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
619 WREG32(mmVM_L2_CNTL2
, tmp
);
620 tmp
= RREG32(mmVM_L2_CNTL3
);
621 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
, 1);
622 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, BANK_SELECT
, 4);
623 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_FRAGMENT_SIZE
, 4);
624 WREG32(mmVM_L2_CNTL3
, tmp
);
625 /* XXX: set to enable PTE/PDE in system memory */
626 tmp
= RREG32(mmVM_L2_CNTL4
);
627 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_PHYSICAL
, 0);
628 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SHARED
, 0);
629 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PDE_REQUEST_SNOOP
, 0);
630 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_PHYSICAL
, 0);
631 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SHARED
, 0);
632 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT0_PTE_REQUEST_SNOOP
, 0);
633 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_PHYSICAL
, 0);
634 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SHARED
, 0);
635 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PDE_REQUEST_SNOOP
, 0);
636 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_PHYSICAL
, 0);
637 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SHARED
, 0);
638 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL4
, VMC_TAP_CONTEXT1_PTE_REQUEST_SNOOP
, 0);
639 WREG32(mmVM_L2_CNTL4
, tmp
);
641 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
, adev
->mc
.gtt_start
>> 12);
642 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
, adev
->mc
.gtt_end
>> 12);
643 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, adev
->gart
.table_addr
>> 12);
644 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
645 (u32
)(adev
->dummy_page
.addr
>> 12));
646 WREG32(mmVM_CONTEXT0_CNTL2
, 0);
647 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
648 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
649 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
650 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
651 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
653 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR
, 0);
654 WREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR
, 0);
655 WREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET
, 0);
657 /* empty context1-15 */
658 /* FIXME start with 4G, once using 2 level pt switch to full
661 /* set vm size, must be a multiple of 4 */
662 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
663 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
, adev
->vm_manager
.max_pfn
- 1);
664 for (i
= 1; i
< 16; i
++) {
666 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
,
667 adev
->gart
.table_addr
>> 12);
669 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8,
670 adev
->gart
.table_addr
>> 12);
673 /* enable context1-15 */
674 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
675 (u32
)(adev
->dummy_page
.addr
>> 12));
676 WREG32(mmVM_CONTEXT1_CNTL2
, 4);
677 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
678 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
, 1);
679 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_DEPTH
, 1);
680 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
681 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
682 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
683 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
684 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, READ_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
685 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
686 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
687 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_BLOCK_SIZE
,
688 amdgpu_vm_block_size
- 9);
689 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
690 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
691 gmc_v8_0_set_fault_enable_default(adev
, false);
693 gmc_v8_0_set_fault_enable_default(adev
, true);
695 gmc_v8_0_gart_flush_gpu_tlb(adev
, 0);
696 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
697 (unsigned)(adev
->mc
.gtt_size
>> 20),
698 (unsigned long long)adev
->gart
.table_addr
);
699 adev
->gart
.ready
= true;
703 static int gmc_v8_0_gart_init(struct amdgpu_device
*adev
)
707 if (adev
->gart
.robj
) {
708 WARN(1, "R600 PCIE GART already initialized\n");
711 /* Initialize common gart structure */
712 r
= amdgpu_gart_init(adev
);
715 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
716 return amdgpu_gart_table_vram_alloc(adev
);
720 * gmc_v8_0_gart_disable - gart disable
722 * @adev: amdgpu_device pointer
724 * This disables all VM page table (CIK).
726 static void gmc_v8_0_gart_disable(struct amdgpu_device
*adev
)
730 /* Disable all tables */
731 WREG32(mmVM_CONTEXT0_CNTL
, 0);
732 WREG32(mmVM_CONTEXT1_CNTL
, 0);
733 /* Setup TLB control */
734 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
735 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
736 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 0);
737 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 0);
738 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
740 tmp
= RREG32(mmVM_L2_CNTL
);
741 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
742 WREG32(mmVM_L2_CNTL
, tmp
);
743 WREG32(mmVM_L2_CNTL2
, 0);
744 amdgpu_gart_table_vram_unpin(adev
);
748 * gmc_v8_0_gart_fini - vm fini callback
750 * @adev: amdgpu_device pointer
752 * Tears down the driver GART/VM setup (CIK).
754 static void gmc_v8_0_gart_fini(struct amdgpu_device
*adev
)
756 amdgpu_gart_table_vram_free(adev
);
757 amdgpu_gart_fini(adev
);
762 * VMID 0 is the physical GPU addresses as used by the kernel.
763 * VMIDs 1-15 are used for userspace clients and are handled
764 * by the amdgpu vm/hsa code.
767 * gmc_v8_0_vm_init - cik vm init callback
769 * @adev: amdgpu_device pointer
771 * Inits cik specific vm parameters (number of VMs, base of vram for
773 * Returns 0 for success.
775 static int gmc_v8_0_vm_init(struct amdgpu_device
*adev
)
779 * VMID 0 is reserved for System
780 * amdgpu graphics/compute will use VMIDs 1-7
781 * amdkfd will use VMIDs 8-15
783 adev
->vm_manager
.num_ids
= AMDGPU_NUM_OF_VMIDS
;
784 amdgpu_vm_manager_init(adev
);
786 /* base offset of vram pages */
787 if (adev
->flags
& AMD_IS_APU
) {
788 u64 tmp
= RREG32(mmMC_VM_FB_OFFSET
);
790 adev
->vm_manager
.vram_base_offset
= tmp
;
792 adev
->vm_manager
.vram_base_offset
= 0;
798 * gmc_v8_0_vm_fini - cik vm fini callback
800 * @adev: amdgpu_device pointer
802 * Tear down any asic specific VM setup (CIK).
804 static void gmc_v8_0_vm_fini(struct amdgpu_device
*adev
)
809 * gmc_v8_0_vm_decode_fault - print human readable fault info
811 * @adev: amdgpu_device pointer
812 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
813 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
815 * Print human readable fault information (CIK).
817 static void gmc_v8_0_vm_decode_fault(struct amdgpu_device
*adev
,
818 u32 status
, u32 addr
, u32 mc_client
)
821 u32 vmid
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
822 u32 protections
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
824 char block
[5] = { mc_client
>> 24, (mc_client
>> 16) & 0xff,
825 (mc_client
>> 8) & 0xff, mc_client
& 0xff, 0 };
827 mc_id
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
830 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
831 protections
, vmid
, addr
,
832 REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
834 "write" : "read", block
, mc_client
, mc_id
);
837 static int gmc_v8_0_convert_vram_type(int mc_seq_vram_type
)
839 switch (mc_seq_vram_type
) {
840 case MC_SEQ_MISC0__MT__GDDR1
:
841 return AMDGPU_VRAM_TYPE_GDDR1
;
842 case MC_SEQ_MISC0__MT__DDR2
:
843 return AMDGPU_VRAM_TYPE_DDR2
;
844 case MC_SEQ_MISC0__MT__GDDR3
:
845 return AMDGPU_VRAM_TYPE_GDDR3
;
846 case MC_SEQ_MISC0__MT__GDDR4
:
847 return AMDGPU_VRAM_TYPE_GDDR4
;
848 case MC_SEQ_MISC0__MT__GDDR5
:
849 return AMDGPU_VRAM_TYPE_GDDR5
;
850 case MC_SEQ_MISC0__MT__HBM
:
851 return AMDGPU_VRAM_TYPE_HBM
;
852 case MC_SEQ_MISC0__MT__DDR3
:
853 return AMDGPU_VRAM_TYPE_DDR3
;
855 return AMDGPU_VRAM_TYPE_UNKNOWN
;
859 static int gmc_v8_0_early_init(void *handle
)
861 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
863 gmc_v8_0_set_gart_funcs(adev
);
864 gmc_v8_0_set_irq_funcs(adev
);
869 static int gmc_v8_0_late_init(void *handle
)
871 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
873 return amdgpu_irq_get(adev
, &adev
->mc
.vm_fault
, 0);
876 #define mmMC_SEQ_MISC0_FIJI 0xA71
878 static int gmc_v8_0_sw_init(void *handle
)
882 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
884 if (adev
->flags
& AMD_IS_APU
) {
885 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
889 if (adev
->asic_type
== CHIP_FIJI
)
890 tmp
= RREG32(mmMC_SEQ_MISC0_FIJI
);
892 tmp
= RREG32(mmMC_SEQ_MISC0
);
893 tmp
&= MC_SEQ_MISC0__MT__MASK
;
894 adev
->mc
.vram_type
= gmc_v8_0_convert_vram_type(tmp
);
897 r
= amdgpu_irq_add_id(adev
, 146, &adev
->mc
.vm_fault
);
901 r
= amdgpu_irq_add_id(adev
, 147, &adev
->mc
.vm_fault
);
905 /* Adjust VM size here.
906 * Currently set to 4GB ((1 << 20) 4k pages).
907 * Max GPUVM size for cayman and SI is 40 bits.
909 adev
->vm_manager
.max_pfn
= amdgpu_vm_size
<< 18;
911 /* Set the internal MC address mask
912 * This is the max address of the GPU's
913 * internal address space.
915 adev
->mc
.mc_mask
= 0xffffffffffULL
; /* 40 bit MC */
917 /* set DMA mask + need_dma32 flags.
918 * PCIE - can handle 40-bits.
919 * IGP - can handle 40-bits
920 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
922 adev
->need_dma32
= false;
923 dma_bits
= adev
->need_dma32
? 32 : 40;
924 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
926 adev
->need_dma32
= true;
928 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
930 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
932 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
933 printk(KERN_WARNING
"amdgpu: No coherent DMA available.\n");
936 r
= gmc_v8_0_init_microcode(adev
);
938 DRM_ERROR("Failed to load mc firmware!\n");
942 r
= gmc_v8_0_mc_init(adev
);
947 r
= amdgpu_bo_init(adev
);
951 r
= gmc_v8_0_gart_init(adev
);
955 if (!adev
->vm_manager
.enabled
) {
956 r
= gmc_v8_0_vm_init(adev
);
958 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
961 adev
->vm_manager
.enabled
= true;
967 static int gmc_v8_0_sw_fini(void *handle
)
969 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
971 if (adev
->vm_manager
.enabled
) {
972 amdgpu_vm_manager_fini(adev
);
973 gmc_v8_0_vm_fini(adev
);
974 adev
->vm_manager
.enabled
= false;
976 gmc_v8_0_gart_fini(adev
);
977 amdgpu_gem_force_release(adev
);
978 amdgpu_bo_fini(adev
);
983 static int gmc_v8_0_hw_init(void *handle
)
986 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
988 gmc_v8_0_init_golden_registers(adev
);
990 gmc_v8_0_mc_program(adev
);
992 if (adev
->asic_type
== CHIP_TONGA
) {
993 r
= gmc_v8_0_mc_load_microcode(adev
);
995 DRM_ERROR("Failed to load MC firmware!\n");
1000 r
= gmc_v8_0_gart_enable(adev
);
1007 static int gmc_v8_0_hw_fini(void *handle
)
1009 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1011 amdgpu_irq_put(adev
, &adev
->mc
.vm_fault
, 0);
1012 gmc_v8_0_gart_disable(adev
);
1017 static int gmc_v8_0_suspend(void *handle
)
1019 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1021 if (adev
->vm_manager
.enabled
) {
1022 gmc_v8_0_vm_fini(adev
);
1023 adev
->vm_manager
.enabled
= false;
1025 gmc_v8_0_hw_fini(adev
);
1030 static int gmc_v8_0_resume(void *handle
)
1033 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1035 r
= gmc_v8_0_hw_init(adev
);
1039 if (!adev
->vm_manager
.enabled
) {
1040 r
= gmc_v8_0_vm_init(adev
);
1042 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
1045 adev
->vm_manager
.enabled
= true;
1051 static bool gmc_v8_0_is_idle(void *handle
)
1053 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1054 u32 tmp
= RREG32(mmSRBM_STATUS
);
1056 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1057 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
| SRBM_STATUS__VMC_BUSY_MASK
))
1063 static int gmc_v8_0_wait_for_idle(void *handle
)
1067 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1069 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1070 /* read MC_STATUS */
1071 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__MCB_BUSY_MASK
|
1072 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1073 SRBM_STATUS__MCC_BUSY_MASK
|
1074 SRBM_STATUS__MCD_BUSY_MASK
|
1075 SRBM_STATUS__VMC_BUSY_MASK
|
1076 SRBM_STATUS__VMC1_BUSY_MASK
);
1085 static void gmc_v8_0_print_status(void *handle
)
1088 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1090 dev_info(adev
->dev
, "GMC 8.x registers\n");
1091 dev_info(adev
->dev
, " SRBM_STATUS=0x%08X\n",
1092 RREG32(mmSRBM_STATUS
));
1093 dev_info(adev
->dev
, " SRBM_STATUS2=0x%08X\n",
1094 RREG32(mmSRBM_STATUS2
));
1096 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1097 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
));
1098 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1099 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
));
1100 dev_info(adev
->dev
, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1101 RREG32(mmMC_VM_MX_L1_TLB_CNTL
));
1102 dev_info(adev
->dev
, " VM_L2_CNTL=0x%08X\n",
1103 RREG32(mmVM_L2_CNTL
));
1104 dev_info(adev
->dev
, " VM_L2_CNTL2=0x%08X\n",
1105 RREG32(mmVM_L2_CNTL2
));
1106 dev_info(adev
->dev
, " VM_L2_CNTL3=0x%08X\n",
1107 RREG32(mmVM_L2_CNTL3
));
1108 dev_info(adev
->dev
, " VM_L2_CNTL4=0x%08X\n",
1109 RREG32(mmVM_L2_CNTL4
));
1110 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1111 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
));
1112 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1113 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
));
1114 dev_info(adev
->dev
, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1115 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
));
1116 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL2=0x%08X\n",
1117 RREG32(mmVM_CONTEXT0_CNTL2
));
1118 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL=0x%08X\n",
1119 RREG32(mmVM_CONTEXT0_CNTL
));
1120 dev_info(adev
->dev
, " VM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR=0x%08X\n",
1121 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR
));
1122 dev_info(adev
->dev
, " VM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR=0x%08X\n",
1123 RREG32(mmVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR
));
1124 dev_info(adev
->dev
, " mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET=0x%08X\n",
1125 RREG32(mmVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET
));
1126 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1127 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
));
1128 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1129 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
));
1130 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1131 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
));
1132 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL2=0x%08X\n",
1133 RREG32(mmVM_CONTEXT1_CNTL2
));
1134 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL=0x%08X\n",
1135 RREG32(mmVM_CONTEXT1_CNTL
));
1136 for (i
= 0; i
< 16; i
++) {
1138 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1139 i
, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
));
1141 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1142 i
, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8));
1144 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1145 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
));
1146 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1147 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
));
1148 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1149 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
));
1150 dev_info(adev
->dev
, " MC_VM_FB_LOCATION=0x%08X\n",
1151 RREG32(mmMC_VM_FB_LOCATION
));
1152 dev_info(adev
->dev
, " MC_VM_AGP_BASE=0x%08X\n",
1153 RREG32(mmMC_VM_AGP_BASE
));
1154 dev_info(adev
->dev
, " MC_VM_AGP_TOP=0x%08X\n",
1155 RREG32(mmMC_VM_AGP_TOP
));
1156 dev_info(adev
->dev
, " MC_VM_AGP_BOT=0x%08X\n",
1157 RREG32(mmMC_VM_AGP_BOT
));
1159 dev_info(adev
->dev
, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1160 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
));
1161 dev_info(adev
->dev
, " HDP_NONSURFACE_BASE=0x%08X\n",
1162 RREG32(mmHDP_NONSURFACE_BASE
));
1163 dev_info(adev
->dev
, " HDP_NONSURFACE_INFO=0x%08X\n",
1164 RREG32(mmHDP_NONSURFACE_INFO
));
1165 dev_info(adev
->dev
, " HDP_NONSURFACE_SIZE=0x%08X\n",
1166 RREG32(mmHDP_NONSURFACE_SIZE
));
1167 dev_info(adev
->dev
, " HDP_MISC_CNTL=0x%08X\n",
1168 RREG32(mmHDP_MISC_CNTL
));
1169 dev_info(adev
->dev
, " HDP_HOST_PATH_CNTL=0x%08X\n",
1170 RREG32(mmHDP_HOST_PATH_CNTL
));
1172 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
1173 dev_info(adev
->dev
, " %d:\n", i
);
1174 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1175 0xb05 + j
, RREG32(0xb05 + j
));
1176 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1177 0xb06 + j
, RREG32(0xb06 + j
));
1178 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1179 0xb07 + j
, RREG32(0xb07 + j
));
1180 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1181 0xb08 + j
, RREG32(0xb08 + j
));
1182 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1183 0xb09 + j
, RREG32(0xb09 + j
));
1186 dev_info(adev
->dev
, " BIF_FB_EN=0x%08X\n",
1187 RREG32(mmBIF_FB_EN
));
1190 static int gmc_v8_0_soft_reset(void *handle
)
1192 struct amdgpu_mode_mc_save save
;
1193 u32 srbm_soft_reset
= 0;
1194 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1195 u32 tmp
= RREG32(mmSRBM_STATUS
);
1197 if (tmp
& SRBM_STATUS__VMC_BUSY_MASK
)
1198 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1199 SRBM_SOFT_RESET
, SOFT_RESET_VMC
, 1);
1201 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1202 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
)) {
1203 if (!(adev
->flags
& AMD_IS_APU
))
1204 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1205 SRBM_SOFT_RESET
, SOFT_RESET_MC
, 1);
1208 if (srbm_soft_reset
) {
1209 gmc_v8_0_print_status((void *)adev
);
1211 gmc_v8_0_mc_stop(adev
, &save
);
1212 if (gmc_v8_0_wait_for_idle(adev
)) {
1213 dev_warn(adev
->dev
, "Wait for GMC idle timed out !\n");
1217 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1218 tmp
|= srbm_soft_reset
;
1219 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1220 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1221 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1225 tmp
&= ~srbm_soft_reset
;
1226 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1227 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1229 /* Wait a little for things to settle down */
1232 gmc_v8_0_mc_resume(adev
, &save
);
1235 gmc_v8_0_print_status((void *)adev
);
1241 static int gmc_v8_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
1242 struct amdgpu_irq_src
*src
,
1244 enum amdgpu_interrupt_state state
)
1247 u32 bits
= (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1248 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1249 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1250 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1251 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1252 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1253 VM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
);
1256 case AMDGPU_IRQ_STATE_DISABLE
:
1257 /* system context */
1258 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1260 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1262 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1264 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1266 case AMDGPU_IRQ_STATE_ENABLE
:
1267 /* system context */
1268 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1270 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1272 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1274 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1283 static int gmc_v8_0_process_interrupt(struct amdgpu_device
*adev
,
1284 struct amdgpu_irq_src
*source
,
1285 struct amdgpu_iv_entry
*entry
)
1287 u32 addr
, status
, mc_client
;
1289 addr
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
);
1290 status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
1291 mc_client
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT
);
1292 /* reset addr and status */
1293 WREG32_P(mmVM_CONTEXT1_CNTL2
, 1, ~1);
1295 if (!addr
&& !status
)
1298 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_FIRST
)
1299 gmc_v8_0_set_fault_enable_default(adev
, false);
1301 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1302 entry
->src_id
, entry
->src_data
);
1303 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1305 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1307 gmc_v8_0_vm_decode_fault(adev
, status
, addr
, mc_client
);
1312 static void fiji_update_mc_medium_grain_clock_gating(struct amdgpu_device
*adev
,
1318 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1319 data
|= MC_HUB_MISC_HUB_CG__ENABLE_MASK
;
1320 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1322 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1323 data
|= MC_HUB_MISC_SIP_CG__ENABLE_MASK
;
1324 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1326 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1327 data
|= MC_HUB_MISC_VM_CG__ENABLE_MASK
;
1328 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1330 data
= RREG32(mmMC_XPB_CLK_GAT
);
1331 data
|= MC_XPB_CLK_GAT__ENABLE_MASK
;
1332 WREG32(mmMC_XPB_CLK_GAT
, data
);
1334 data
= RREG32(mmATC_MISC_CG
);
1335 data
|= ATC_MISC_CG__ENABLE_MASK
;
1336 WREG32(mmATC_MISC_CG
, data
);
1338 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1339 data
|= MC_CITF_MISC_WR_CG__ENABLE_MASK
;
1340 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1342 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1343 data
|= MC_CITF_MISC_RD_CG__ENABLE_MASK
;
1344 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1346 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1347 data
|= MC_CITF_MISC_VM_CG__ENABLE_MASK
;
1348 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1350 data
= RREG32(mmVM_L2_CG
);
1351 data
|= VM_L2_CG__ENABLE_MASK
;
1352 WREG32(mmVM_L2_CG
, data
);
1354 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1355 data
&= ~MC_HUB_MISC_HUB_CG__ENABLE_MASK
;
1356 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1358 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1359 data
&= ~MC_HUB_MISC_SIP_CG__ENABLE_MASK
;
1360 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1362 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1363 data
&= ~MC_HUB_MISC_VM_CG__ENABLE_MASK
;
1364 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1366 data
= RREG32(mmMC_XPB_CLK_GAT
);
1367 data
&= ~MC_XPB_CLK_GAT__ENABLE_MASK
;
1368 WREG32(mmMC_XPB_CLK_GAT
, data
);
1370 data
= RREG32(mmATC_MISC_CG
);
1371 data
&= ~ATC_MISC_CG__ENABLE_MASK
;
1372 WREG32(mmATC_MISC_CG
, data
);
1374 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1375 data
&= ~MC_CITF_MISC_WR_CG__ENABLE_MASK
;
1376 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1378 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1379 data
&= ~MC_CITF_MISC_RD_CG__ENABLE_MASK
;
1380 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1382 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1383 data
&= ~MC_CITF_MISC_VM_CG__ENABLE_MASK
;
1384 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1386 data
= RREG32(mmVM_L2_CG
);
1387 data
&= ~VM_L2_CG__ENABLE_MASK
;
1388 WREG32(mmVM_L2_CG
, data
);
1392 static void fiji_update_mc_light_sleep(struct amdgpu_device
*adev
,
1398 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1399 data
|= MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
;
1400 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1402 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1403 data
|= MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
;
1404 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1406 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1407 data
|= MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1408 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1410 data
= RREG32(mmMC_XPB_CLK_GAT
);
1411 data
|= MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
;
1412 WREG32(mmMC_XPB_CLK_GAT
, data
);
1414 data
= RREG32(mmATC_MISC_CG
);
1415 data
|= ATC_MISC_CG__MEM_LS_ENABLE_MASK
;
1416 WREG32(mmATC_MISC_CG
, data
);
1418 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1419 data
|= MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
;
1420 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1422 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1423 data
|= MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
;
1424 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1426 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1427 data
|= MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1428 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1430 data
= RREG32(mmVM_L2_CG
);
1431 data
|= VM_L2_CG__MEM_LS_ENABLE_MASK
;
1432 WREG32(mmVM_L2_CG
, data
);
1434 data
= RREG32(mmMC_HUB_MISC_HUB_CG
);
1435 data
&= ~MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
;
1436 WREG32(mmMC_HUB_MISC_HUB_CG
, data
);
1438 data
= RREG32(mmMC_HUB_MISC_SIP_CG
);
1439 data
&= ~MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
;
1440 WREG32(mmMC_HUB_MISC_SIP_CG
, data
);
1442 data
= RREG32(mmMC_HUB_MISC_VM_CG
);
1443 data
&= ~MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1444 WREG32(mmMC_HUB_MISC_VM_CG
, data
);
1446 data
= RREG32(mmMC_XPB_CLK_GAT
);
1447 data
&= ~MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
;
1448 WREG32(mmMC_XPB_CLK_GAT
, data
);
1450 data
= RREG32(mmATC_MISC_CG
);
1451 data
&= ~ATC_MISC_CG__MEM_LS_ENABLE_MASK
;
1452 WREG32(mmATC_MISC_CG
, data
);
1454 data
= RREG32(mmMC_CITF_MISC_WR_CG
);
1455 data
&= ~MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
;
1456 WREG32(mmMC_CITF_MISC_WR_CG
, data
);
1458 data
= RREG32(mmMC_CITF_MISC_RD_CG
);
1459 data
&= ~MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
;
1460 WREG32(mmMC_CITF_MISC_RD_CG
, data
);
1462 data
= RREG32(mmMC_CITF_MISC_VM_CG
);
1463 data
&= ~MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
;
1464 WREG32(mmMC_CITF_MISC_VM_CG
, data
);
1466 data
= RREG32(mmVM_L2_CG
);
1467 data
&= ~VM_L2_CG__MEM_LS_ENABLE_MASK
;
1468 WREG32(mmVM_L2_CG
, data
);
1472 static int gmc_v8_0_set_clockgating_state(void *handle
,
1473 enum amd_clockgating_state state
)
1475 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1477 switch (adev
->asic_type
) {
1479 fiji_update_mc_medium_grain_clock_gating(adev
,
1480 state
== AMD_CG_STATE_GATE
? true : false);
1481 fiji_update_mc_light_sleep(adev
,
1482 state
== AMD_CG_STATE_GATE
? true : false);
1490 static int gmc_v8_0_set_powergating_state(void *handle
,
1491 enum amd_powergating_state state
)
1496 const struct amd_ip_funcs gmc_v8_0_ip_funcs
= {
1497 .early_init
= gmc_v8_0_early_init
,
1498 .late_init
= gmc_v8_0_late_init
,
1499 .sw_init
= gmc_v8_0_sw_init
,
1500 .sw_fini
= gmc_v8_0_sw_fini
,
1501 .hw_init
= gmc_v8_0_hw_init
,
1502 .hw_fini
= gmc_v8_0_hw_fini
,
1503 .suspend
= gmc_v8_0_suspend
,
1504 .resume
= gmc_v8_0_resume
,
1505 .is_idle
= gmc_v8_0_is_idle
,
1506 .wait_for_idle
= gmc_v8_0_wait_for_idle
,
1507 .soft_reset
= gmc_v8_0_soft_reset
,
1508 .print_status
= gmc_v8_0_print_status
,
1509 .set_clockgating_state
= gmc_v8_0_set_clockgating_state
,
1510 .set_powergating_state
= gmc_v8_0_set_powergating_state
,
1513 static const struct amdgpu_gart_funcs gmc_v8_0_gart_funcs
= {
1514 .flush_gpu_tlb
= gmc_v8_0_gart_flush_gpu_tlb
,
1515 .set_pte_pde
= gmc_v8_0_gart_set_pte_pde
,
1518 static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs
= {
1519 .set
= gmc_v8_0_vm_fault_interrupt_state
,
1520 .process
= gmc_v8_0_process_interrupt
,
1523 static void gmc_v8_0_set_gart_funcs(struct amdgpu_device
*adev
)
1525 if (adev
->gart
.gart_funcs
== NULL
)
1526 adev
->gart
.gart_funcs
= &gmc_v8_0_gart_funcs
;
1529 static void gmc_v8_0_set_irq_funcs(struct amdgpu_device
*adev
)
1531 adev
->mc
.vm_fault
.num_types
= 1;
1532 adev
->mc
.vm_fault
.funcs
= &gmc_v8_0_irq_funcs
;