2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
29 #include "amdgpu_ucode.h"
31 #include "bif/bif_4_1_d.h"
32 #include "bif/bif_4_1_sh_mask.h"
34 #include "gmc/gmc_7_1_d.h"
35 #include "gmc/gmc_7_1_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
40 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device
*adev
);
41 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device
*adev
);
43 MODULE_FIRMWARE("radeon/boniare_mc.bin");
44 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
47 * gmc8_mc_wait_for_idle - wait for MC idle callback.
49 * @adev: amdgpu_device pointer
51 * Wait for the MC (memory controller) to be idle.
53 * Returns 0 if the MC is idle, -1 if not.
55 int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device
*adev
)
60 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
62 tmp
= RREG32(mmSRBM_STATUS
) & 0x1F00;
70 void gmc_v7_0_mc_stop(struct amdgpu_device
*adev
,
71 struct amdgpu_mode_mc_save
*save
)
75 if (adev
->mode_info
.num_crtc
)
76 amdgpu_display_stop_mc_access(adev
, save
);
78 amdgpu_asic_wait_for_mc_idle(adev
);
80 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
81 if (REG_GET_FIELD(blackout
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
) != 1) {
82 /* Block CPU access */
83 WREG32(mmBIF_FB_EN
, 0);
85 blackout
= REG_SET_FIELD(blackout
,
86 MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
87 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
89 /* wait for the MC to settle */
93 void gmc_v7_0_mc_resume(struct amdgpu_device
*adev
,
94 struct amdgpu_mode_mc_save
*save
)
98 /* unblackout the MC */
99 tmp
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
100 tmp
= REG_SET_FIELD(tmp
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
101 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, tmp
);
102 /* allow CPU access */
103 tmp
= REG_SET_FIELD(0, BIF_FB_EN
, FB_READ_EN
, 1);
104 tmp
= REG_SET_FIELD(tmp
, BIF_FB_EN
, FB_WRITE_EN
, 1);
105 WREG32(mmBIF_FB_EN
, tmp
);
107 if (adev
->mode_info
.num_crtc
)
108 amdgpu_display_resume_mc_access(adev
, save
);
112 * gmc_v7_0_init_microcode - load ucode images from disk
114 * @adev: amdgpu_device pointer
116 * Use the firmware interface to load the ucode images into
117 * the driver (not loaded into hw).
118 * Returns 0 on success, error on failure.
120 static int gmc_v7_0_init_microcode(struct amdgpu_device
*adev
)
122 const char *chip_name
;
128 switch (adev
->asic_type
) {
130 chip_name
= "bonaire";
133 chip_name
= "hawaii";
141 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_mc.bin", chip_name
);
142 err
= request_firmware(&adev
->mc
.fw
, fw_name
, adev
->dev
);
145 err
= amdgpu_ucode_validate(adev
->mc
.fw
);
150 "cik_mc: Failed to load firmware \"%s\"\n",
152 release_firmware(adev
->mc
.fw
);
159 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
161 * @adev: amdgpu_device pointer
163 * Load the GDDR MC ucode into the hw (CIK).
164 * Returns 0 on success, error on failure.
166 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device
*adev
)
168 const struct mc_firmware_header_v1_0
*hdr
;
169 const __le32
*fw_data
= NULL
;
170 const __le32
*io_mc_regs
= NULL
;
171 u32 running
, blackout
= 0;
172 int i
, ucode_size
, regs_size
;
177 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
178 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
180 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
181 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
182 io_mc_regs
= (const __le32
*)
183 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
184 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
185 fw_data
= (const __le32
*)
186 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
188 running
= REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL
), MC_SEQ_SUP_CNTL
, RUN
);
192 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
193 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
196 /* reset the engine and set to writable */
197 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
198 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
200 /* load mc io regs */
201 for (i
= 0; i
< regs_size
; i
++) {
202 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
203 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
205 /* load the MC ucode */
206 for (i
= 0; i
< ucode_size
; i
++)
207 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
209 /* put the engine back into the active state */
210 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
211 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
212 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
214 /* wait for training to complete */
215 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
216 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
217 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D0
))
221 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
222 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
223 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D1
))
229 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
235 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device
*adev
,
236 struct amdgpu_mc
*mc
)
238 if (mc
->mc_vram_size
> 0xFFC0000000ULL
) {
239 /* leave room for at least 1024M GTT */
240 dev_warn(adev
->dev
, "limiting VRAM\n");
241 mc
->real_vram_size
= 0xFFC0000000ULL
;
242 mc
->mc_vram_size
= 0xFFC0000000ULL
;
244 amdgpu_vram_location(adev
, &adev
->mc
, 0);
245 adev
->mc
.gtt_base_align
= 0;
246 amdgpu_gtt_location(adev
, mc
);
250 * gmc_v7_0_mc_program - program the GPU memory controller
252 * @adev: amdgpu_device pointer
254 * Set the location of vram, gart, and AGP in the GPU's
255 * physical address space (CIK).
257 static void gmc_v7_0_mc_program(struct amdgpu_device
*adev
)
259 struct amdgpu_mode_mc_save save
;
264 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
265 WREG32((0xb05 + j
), 0x00000000);
266 WREG32((0xb06 + j
), 0x00000000);
267 WREG32((0xb07 + j
), 0x00000000);
268 WREG32((0xb08 + j
), 0x00000000);
269 WREG32((0xb09 + j
), 0x00000000);
271 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
, 0);
273 if (adev
->mode_info
.num_crtc
)
274 amdgpu_display_set_vga_render_state(adev
, false);
276 gmc_v7_0_mc_stop(adev
, &save
);
277 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
278 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
280 /* Update configuration */
281 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
282 adev
->mc
.vram_start
>> 12);
283 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
284 adev
->mc
.vram_end
>> 12);
285 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
286 adev
->vram_scratch
.gpu_addr
>> 12);
287 tmp
= ((adev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
288 tmp
|= ((adev
->mc
.vram_start
>> 24) & 0xFFFF);
289 WREG32(mmMC_VM_FB_LOCATION
, tmp
);
290 /* XXX double check these! */
291 WREG32(mmHDP_NONSURFACE_BASE
, (adev
->mc
.vram_start
>> 8));
292 WREG32(mmHDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
293 WREG32(mmHDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
294 WREG32(mmMC_VM_AGP_BASE
, 0);
295 WREG32(mmMC_VM_AGP_TOP
, 0x0FFFFFFF);
296 WREG32(mmMC_VM_AGP_BOT
, 0x0FFFFFFF);
297 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
298 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
300 gmc_v7_0_mc_resume(adev
, &save
);
302 WREG32(mmBIF_FB_EN
, BIF_FB_EN__FB_READ_EN_MASK
| BIF_FB_EN__FB_WRITE_EN_MASK
);
304 tmp
= RREG32(mmHDP_MISC_CNTL
);
305 tmp
= REG_SET_FIELD(tmp
, HDP_MISC_CNTL
, FLUSH_INVALIDATE_CACHE
, 1);
306 WREG32(mmHDP_MISC_CNTL
, tmp
);
308 tmp
= RREG32(mmHDP_HOST_PATH_CNTL
);
309 WREG32(mmHDP_HOST_PATH_CNTL
, tmp
);
313 * gmc_v7_0_mc_init - initialize the memory controller driver params
315 * @adev: amdgpu_device pointer
317 * Look up the amount of vram, vram width, and decide how to place
318 * vram and gart within the GPU's physical address space (CIK).
319 * Returns 0 for success.
321 static int gmc_v7_0_mc_init(struct amdgpu_device
*adev
)
324 int chansize
, numchan
;
326 /* Get VRAM informations */
327 tmp
= RREG32(mmMC_ARB_RAMCFG
);
328 if (REG_GET_FIELD(tmp
, MC_ARB_RAMCFG
, CHANSIZE
)) {
333 tmp
= RREG32(mmMC_SHARED_CHMAP
);
334 switch (REG_GET_FIELD(tmp
, MC_SHARED_CHMAP
, NOOFCHAN
)) {
364 adev
->mc
.vram_width
= numchan
* chansize
;
365 /* Could aper size report 0 ? */
366 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
367 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
368 /* size in MB on si */
369 adev
->mc
.mc_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
370 adev
->mc
.real_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
371 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
373 /* unless the user had overridden it, set the gart
374 * size equal to the 1024 or vram, whichever is larger.
376 if (amdgpu_gart_size
== -1)
377 adev
->mc
.gtt_size
= max((1024ULL << 20), adev
->mc
.mc_vram_size
);
379 adev
->mc
.gtt_size
= (uint64_t)amdgpu_gart_size
<< 20;
381 gmc_v7_0_vram_gtt_location(adev
, &adev
->mc
);
388 * VMID 0 is the physical GPU addresses as used by the kernel.
389 * VMIDs 1-15 are used for userspace clients and are handled
390 * by the amdgpu vm/hsa code.
394 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
396 * @adev: amdgpu_device pointer
397 * @vmid: vm instance to flush
399 * Flush the TLB for the requested page table (CIK).
401 static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
404 /* flush hdp cache */
405 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0);
407 /* bits 0-15 are the VM contexts0-15 */
408 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
412 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
414 * @adev: amdgpu_device pointer
415 * @cpu_pt_addr: cpu address of the page table
416 * @gpu_page_idx: entry in the page table to update
417 * @addr: dst addr to write into pte/pde
418 * @flags: access flags
420 * Update the page tables using the CPU.
422 static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
424 uint32_t gpu_page_idx
,
428 void __iomem
*ptr
= (void *)cpu_pt_addr
;
431 value
= addr
& 0xFFFFFFFFFFFFF000ULL
;
433 writeq(value
, ptr
+ (gpu_page_idx
* 8));
439 * gmc_v8_0_set_fault_enable_default - update VM fault handling
441 * @adev: amdgpu_device pointer
442 * @value: true redirects VM faults to the default page
444 static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device
*adev
,
449 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
450 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
451 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
452 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
453 DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
454 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
455 PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
456 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
457 VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
458 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
459 READ_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
460 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
,
461 WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, value
);
462 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
466 * gmc_v7_0_gart_enable - gart enable
468 * @adev: amdgpu_device pointer
470 * This sets up the TLBs, programs the page tables for VMID0,
471 * sets up the hw for VMIDs 1-15 which are allocated on
472 * demand, and sets up the global locations for the LDS, GDS,
473 * and GPUVM for FSA64 clients (CIK).
474 * Returns 0 for success, errors for failure.
476 static int gmc_v7_0_gart_enable(struct amdgpu_device
*adev
)
481 if (adev
->gart
.robj
== NULL
) {
482 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
485 r
= amdgpu_gart_table_vram_pin(adev
);
488 /* Setup TLB control */
489 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
490 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
491 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 1);
492 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE
, 3);
493 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 1);
494 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_APERTURE_UNMAPPED_ACCESS
, 0);
495 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
497 tmp
= RREG32(mmVM_L2_CNTL
);
498 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
499 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
, 1);
500 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
, 1);
501 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
, 1);
502 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, EFFECTIVE_L2_QUEUE_SIZE
, 7);
503 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, CONTEXT1_IDENTITY_ACCESS_MODE
, 1);
504 WREG32(mmVM_L2_CNTL
, tmp
);
505 tmp
= REG_SET_FIELD(0, VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
506 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
507 WREG32(mmVM_L2_CNTL2
, tmp
);
508 tmp
= RREG32(mmVM_L2_CNTL3
);
509 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
, 1);
510 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, BANK_SELECT
, 4);
511 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_FRAGMENT_SIZE
, 4);
512 WREG32(mmVM_L2_CNTL3
, tmp
);
514 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
, adev
->mc
.gtt_start
>> 12);
515 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
, (adev
->mc
.gtt_end
>> 12) - 1);
516 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, adev
->gart
.table_addr
>> 12);
517 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
518 (u32
)(adev
->dummy_page
.addr
>> 12));
519 WREG32(mmVM_CONTEXT0_CNTL2
, 0);
520 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
521 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
522 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
523 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
524 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
530 /* empty context1-15 */
531 /* FIXME start with 4G, once using 2 level pt switch to full
534 /* set vm size, must be a multiple of 4 */
535 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
536 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
, adev
->vm_manager
.max_pfn
- 1);
537 for (i
= 1; i
< 16; i
++) {
539 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
,
540 adev
->gart
.table_addr
>> 12);
542 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8,
543 adev
->gart
.table_addr
>> 12);
546 /* enable context1-15 */
547 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
548 (u32
)(adev
->dummy_page
.addr
>> 12));
549 WREG32(mmVM_CONTEXT1_CNTL2
, 4);
550 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
551 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
, 1);
552 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_DEPTH
, 1);
553 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_BLOCK_SIZE
,
554 amdgpu_vm_block_size
- 9);
555 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
556 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_ALWAYS
)
557 gmc_v7_0_set_fault_enable_default(adev
, false);
559 gmc_v7_0_set_fault_enable_default(adev
, true);
561 if (adev
->asic_type
== CHIP_KAVERI
) {
562 tmp
= RREG32(mmCHUB_CONTROL
);
564 WREG32(mmCHUB_CONTROL
, tmp
);
567 gmc_v7_0_gart_flush_gpu_tlb(adev
, 0);
568 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
569 (unsigned)(adev
->mc
.gtt_size
>> 20),
570 (unsigned long long)adev
->gart
.table_addr
);
571 adev
->gart
.ready
= true;
575 static int gmc_v7_0_gart_init(struct amdgpu_device
*adev
)
579 if (adev
->gart
.robj
) {
580 WARN(1, "R600 PCIE GART already initialized\n");
583 /* Initialize common gart structure */
584 r
= amdgpu_gart_init(adev
);
587 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
588 return amdgpu_gart_table_vram_alloc(adev
);
592 * gmc_v7_0_gart_disable - gart disable
594 * @adev: amdgpu_device pointer
596 * This disables all VM page table (CIK).
598 static void gmc_v7_0_gart_disable(struct amdgpu_device
*adev
)
602 /* Disable all tables */
603 WREG32(mmVM_CONTEXT0_CNTL
, 0);
604 WREG32(mmVM_CONTEXT1_CNTL
, 0);
605 /* Setup TLB control */
606 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
607 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
608 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 0);
609 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 0);
610 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
612 tmp
= RREG32(mmVM_L2_CNTL
);
613 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
614 WREG32(mmVM_L2_CNTL
, tmp
);
615 WREG32(mmVM_L2_CNTL2
, 0);
616 amdgpu_gart_table_vram_unpin(adev
);
620 * gmc_v7_0_gart_fini - vm fini callback
622 * @adev: amdgpu_device pointer
624 * Tears down the driver GART/VM setup (CIK).
626 static void gmc_v7_0_gart_fini(struct amdgpu_device
*adev
)
628 amdgpu_gart_table_vram_free(adev
);
629 amdgpu_gart_fini(adev
);
634 * VMID 0 is the physical GPU addresses as used by the kernel.
635 * VMIDs 1-15 are used for userspace clients and are handled
636 * by the amdgpu vm/hsa code.
639 * gmc_v7_0_vm_init - cik vm init callback
641 * @adev: amdgpu_device pointer
643 * Inits cik specific vm parameters (number of VMs, base of vram for
645 * Returns 0 for success.
647 static int gmc_v7_0_vm_init(struct amdgpu_device
*adev
)
651 * VMID 0 is reserved for System
652 * amdgpu graphics/compute will use VMIDs 1-7
653 * amdkfd will use VMIDs 8-15
655 adev
->vm_manager
.nvm
= AMDGPU_NUM_OF_VMIDS
;
657 /* base offset of vram pages */
658 if (adev
->flags
& AMD_IS_APU
) {
659 u64 tmp
= RREG32(mmMC_VM_FB_OFFSET
);
661 adev
->vm_manager
.vram_base_offset
= tmp
;
663 adev
->vm_manager
.vram_base_offset
= 0;
669 * gmc_v7_0_vm_fini - cik vm fini callback
671 * @adev: amdgpu_device pointer
673 * Tear down any asic specific VM setup (CIK).
675 static void gmc_v7_0_vm_fini(struct amdgpu_device
*adev
)
680 * gmc_v7_0_vm_decode_fault - print human readable fault info
682 * @adev: amdgpu_device pointer
683 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
684 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
686 * Print human readable fault information (CIK).
688 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device
*adev
,
689 u32 status
, u32 addr
, u32 mc_client
)
692 u32 vmid
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
693 u32 protections
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
695 char block
[5] = { mc_client
>> 24, (mc_client
>> 16) & 0xff,
696 (mc_client
>> 8) & 0xff, mc_client
& 0xff, 0 };
698 mc_id
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
701 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
702 protections
, vmid
, addr
,
703 REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
705 "write" : "read", block
, mc_client
, mc_id
);
709 static const u32 mc_cg_registers
[] = {
710 mmMC_HUB_MISC_HUB_CG
,
711 mmMC_HUB_MISC_SIP_CG
,
715 mmMC_CITF_MISC_WR_CG
,
716 mmMC_CITF_MISC_RD_CG
,
717 mmMC_CITF_MISC_VM_CG
,
721 static const u32 mc_cg_ls_en
[] = {
722 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
,
723 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
,
724 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
,
725 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
,
726 ATC_MISC_CG__MEM_LS_ENABLE_MASK
,
727 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
,
728 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
,
729 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
,
730 VM_L2_CG__MEM_LS_ENABLE_MASK
,
733 static const u32 mc_cg_en
[] = {
734 MC_HUB_MISC_HUB_CG__ENABLE_MASK
,
735 MC_HUB_MISC_SIP_CG__ENABLE_MASK
,
736 MC_HUB_MISC_VM_CG__ENABLE_MASK
,
737 MC_XPB_CLK_GAT__ENABLE_MASK
,
738 ATC_MISC_CG__ENABLE_MASK
,
739 MC_CITF_MISC_WR_CG__ENABLE_MASK
,
740 MC_CITF_MISC_RD_CG__ENABLE_MASK
,
741 MC_CITF_MISC_VM_CG__ENABLE_MASK
,
742 VM_L2_CG__ENABLE_MASK
,
745 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device
*adev
,
751 for (i
= 0; i
< ARRAY_SIZE(mc_cg_registers
); i
++) {
752 orig
= data
= RREG32(mc_cg_registers
[i
]);
753 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_MC_LS
))
754 data
|= mc_cg_ls_en
[i
];
756 data
&= ~mc_cg_ls_en
[i
];
758 WREG32(mc_cg_registers
[i
], data
);
762 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device
*adev
,
768 for (i
= 0; i
< ARRAY_SIZE(mc_cg_registers
); i
++) {
769 orig
= data
= RREG32(mc_cg_registers
[i
]);
770 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_MC_MGCG
))
773 data
&= ~mc_cg_en
[i
];
775 WREG32(mc_cg_registers
[i
], data
);
779 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device
*adev
,
784 orig
= data
= RREG32_PCIE(ixPCIE_CNTL2
);
786 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_BIF_LS
)) {
787 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_LS_EN
, 1);
788 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, MST_MEM_LS_EN
, 1);
789 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, REPLAY_MEM_LS_EN
, 1);
790 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_AGGRESSIVE_LS_EN
, 1);
792 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_LS_EN
, 0);
793 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, MST_MEM_LS_EN
, 0);
794 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, REPLAY_MEM_LS_EN
, 0);
795 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_AGGRESSIVE_LS_EN
, 0);
799 WREG32_PCIE(ixPCIE_CNTL2
, data
);
802 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device
*adev
,
807 orig
= data
= RREG32(mmHDP_HOST_PATH_CNTL
);
809 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_HDP_MGCG
))
810 data
= REG_SET_FIELD(data
, HDP_HOST_PATH_CNTL
, CLOCK_GATING_DIS
, 0);
812 data
= REG_SET_FIELD(data
, HDP_HOST_PATH_CNTL
, CLOCK_GATING_DIS
, 1);
815 WREG32(mmHDP_HOST_PATH_CNTL
, data
);
818 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device
*adev
,
823 orig
= data
= RREG32(mmHDP_MEM_POWER_LS
);
825 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_HDP_LS
))
826 data
= REG_SET_FIELD(data
, HDP_MEM_POWER_LS
, LS_ENABLE
, 1);
828 data
= REG_SET_FIELD(data
, HDP_MEM_POWER_LS
, LS_ENABLE
, 0);
831 WREG32(mmHDP_MEM_POWER_LS
, data
);
834 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type
)
836 switch (mc_seq_vram_type
) {
837 case MC_SEQ_MISC0__MT__GDDR1
:
838 return AMDGPU_VRAM_TYPE_GDDR1
;
839 case MC_SEQ_MISC0__MT__DDR2
:
840 return AMDGPU_VRAM_TYPE_DDR2
;
841 case MC_SEQ_MISC0__MT__GDDR3
:
842 return AMDGPU_VRAM_TYPE_GDDR3
;
843 case MC_SEQ_MISC0__MT__GDDR4
:
844 return AMDGPU_VRAM_TYPE_GDDR4
;
845 case MC_SEQ_MISC0__MT__GDDR5
:
846 return AMDGPU_VRAM_TYPE_GDDR5
;
847 case MC_SEQ_MISC0__MT__HBM
:
848 return AMDGPU_VRAM_TYPE_HBM
;
849 case MC_SEQ_MISC0__MT__DDR3
:
850 return AMDGPU_VRAM_TYPE_DDR3
;
852 return AMDGPU_VRAM_TYPE_UNKNOWN
;
856 static int gmc_v7_0_early_init(void *handle
)
858 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
860 gmc_v7_0_set_gart_funcs(adev
);
861 gmc_v7_0_set_irq_funcs(adev
);
863 if (adev
->flags
& AMD_IS_APU
) {
864 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
866 u32 tmp
= RREG32(mmMC_SEQ_MISC0
);
867 tmp
&= MC_SEQ_MISC0__MT__MASK
;
868 adev
->mc
.vram_type
= gmc_v7_0_convert_vram_type(tmp
);
874 static int gmc_v7_0_late_init(void *handle
)
876 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
878 return amdgpu_irq_get(adev
, &adev
->mc
.vm_fault
, 0);
881 static int gmc_v7_0_sw_init(void *handle
)
885 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
887 r
= amdgpu_gem_init(adev
);
891 r
= amdgpu_irq_add_id(adev
, 146, &adev
->mc
.vm_fault
);
895 r
= amdgpu_irq_add_id(adev
, 147, &adev
->mc
.vm_fault
);
899 /* Adjust VM size here.
900 * Currently set to 4GB ((1 << 20) 4k pages).
901 * Max GPUVM size for cayman and SI is 40 bits.
903 adev
->vm_manager
.max_pfn
= amdgpu_vm_size
<< 18;
905 /* Set the internal MC address mask
906 * This is the max address of the GPU's
907 * internal address space.
909 adev
->mc
.mc_mask
= 0xffffffffffULL
; /* 40 bit MC */
911 /* set DMA mask + need_dma32 flags.
912 * PCIE - can handle 40-bits.
913 * IGP - can handle 40-bits
914 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
916 adev
->need_dma32
= false;
917 dma_bits
= adev
->need_dma32
? 32 : 40;
918 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
920 adev
->need_dma32
= true;
922 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
924 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
926 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
927 printk(KERN_WARNING
"amdgpu: No coherent DMA available.\n");
930 r
= gmc_v7_0_init_microcode(adev
);
932 DRM_ERROR("Failed to load mc firmware!\n");
936 r
= gmc_v7_0_mc_init(adev
);
941 r
= amdgpu_bo_init(adev
);
945 r
= gmc_v7_0_gart_init(adev
);
949 if (!adev
->vm_manager
.enabled
) {
950 r
= gmc_v7_0_vm_init(adev
);
952 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
955 adev
->vm_manager
.enabled
= true;
961 static int gmc_v7_0_sw_fini(void *handle
)
964 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
966 if (adev
->vm_manager
.enabled
) {
967 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
)
968 fence_put(adev
->vm_manager
.active
[i
]);
969 gmc_v7_0_vm_fini(adev
);
970 adev
->vm_manager
.enabled
= false;
972 gmc_v7_0_gart_fini(adev
);
973 amdgpu_gem_fini(adev
);
974 amdgpu_bo_fini(adev
);
979 static int gmc_v7_0_hw_init(void *handle
)
982 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
984 gmc_v7_0_mc_program(adev
);
986 if (!(adev
->flags
& AMD_IS_APU
)) {
987 r
= gmc_v7_0_mc_load_microcode(adev
);
989 DRM_ERROR("Failed to load MC firmware!\n");
994 r
= gmc_v7_0_gart_enable(adev
);
1001 static int gmc_v7_0_hw_fini(void *handle
)
1003 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1005 amdgpu_irq_put(adev
, &adev
->mc
.vm_fault
, 0);
1006 gmc_v7_0_gart_disable(adev
);
1011 static int gmc_v7_0_suspend(void *handle
)
1014 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1016 if (adev
->vm_manager
.enabled
) {
1017 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
)
1018 fence_put(adev
->vm_manager
.active
[i
]);
1019 gmc_v7_0_vm_fini(adev
);
1020 adev
->vm_manager
.enabled
= false;
1022 gmc_v7_0_hw_fini(adev
);
1027 static int gmc_v7_0_resume(void *handle
)
1030 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1032 r
= gmc_v7_0_hw_init(adev
);
1036 if (!adev
->vm_manager
.enabled
) {
1037 r
= gmc_v7_0_vm_init(adev
);
1039 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
1042 adev
->vm_manager
.enabled
= true;
1048 static bool gmc_v7_0_is_idle(void *handle
)
1050 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1051 u32 tmp
= RREG32(mmSRBM_STATUS
);
1053 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1054 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
| SRBM_STATUS__VMC_BUSY_MASK
))
1060 static int gmc_v7_0_wait_for_idle(void *handle
)
1064 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1066 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1067 /* read MC_STATUS */
1068 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__MCB_BUSY_MASK
|
1069 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1070 SRBM_STATUS__MCC_BUSY_MASK
|
1071 SRBM_STATUS__MCD_BUSY_MASK
|
1072 SRBM_STATUS__VMC_BUSY_MASK
);
1081 static void gmc_v7_0_print_status(void *handle
)
1084 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1086 dev_info(adev
->dev
, "GMC 8.x registers\n");
1087 dev_info(adev
->dev
, " SRBM_STATUS=0x%08X\n",
1088 RREG32(mmSRBM_STATUS
));
1089 dev_info(adev
->dev
, " SRBM_STATUS2=0x%08X\n",
1090 RREG32(mmSRBM_STATUS2
));
1092 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1093 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
));
1094 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1095 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
));
1096 dev_info(adev
->dev
, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1097 RREG32(mmMC_VM_MX_L1_TLB_CNTL
));
1098 dev_info(adev
->dev
, " VM_L2_CNTL=0x%08X\n",
1099 RREG32(mmVM_L2_CNTL
));
1100 dev_info(adev
->dev
, " VM_L2_CNTL2=0x%08X\n",
1101 RREG32(mmVM_L2_CNTL2
));
1102 dev_info(adev
->dev
, " VM_L2_CNTL3=0x%08X\n",
1103 RREG32(mmVM_L2_CNTL3
));
1104 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1105 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
));
1106 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1107 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
));
1108 dev_info(adev
->dev
, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1109 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
));
1110 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL2=0x%08X\n",
1111 RREG32(mmVM_CONTEXT0_CNTL2
));
1112 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL=0x%08X\n",
1113 RREG32(mmVM_CONTEXT0_CNTL
));
1114 dev_info(adev
->dev
, " 0x15D4=0x%08X\n",
1116 dev_info(adev
->dev
, " 0x15D8=0x%08X\n",
1118 dev_info(adev
->dev
, " 0x15DC=0x%08X\n",
1120 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1121 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
));
1122 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1123 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
));
1124 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1125 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
));
1126 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL2=0x%08X\n",
1127 RREG32(mmVM_CONTEXT1_CNTL2
));
1128 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL=0x%08X\n",
1129 RREG32(mmVM_CONTEXT1_CNTL
));
1130 for (i
= 0; i
< 16; i
++) {
1132 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1133 i
, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
));
1135 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1136 i
, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8));
1138 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1139 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
));
1140 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1141 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
));
1142 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1143 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
));
1144 dev_info(adev
->dev
, " MC_VM_FB_LOCATION=0x%08X\n",
1145 RREG32(mmMC_VM_FB_LOCATION
));
1146 dev_info(adev
->dev
, " MC_VM_AGP_BASE=0x%08X\n",
1147 RREG32(mmMC_VM_AGP_BASE
));
1148 dev_info(adev
->dev
, " MC_VM_AGP_TOP=0x%08X\n",
1149 RREG32(mmMC_VM_AGP_TOP
));
1150 dev_info(adev
->dev
, " MC_VM_AGP_BOT=0x%08X\n",
1151 RREG32(mmMC_VM_AGP_BOT
));
1153 if (adev
->asic_type
== CHIP_KAVERI
) {
1154 dev_info(adev
->dev
, " CHUB_CONTROL=0x%08X\n",
1155 RREG32(mmCHUB_CONTROL
));
1158 dev_info(adev
->dev
, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1159 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
));
1160 dev_info(adev
->dev
, " HDP_NONSURFACE_BASE=0x%08X\n",
1161 RREG32(mmHDP_NONSURFACE_BASE
));
1162 dev_info(adev
->dev
, " HDP_NONSURFACE_INFO=0x%08X\n",
1163 RREG32(mmHDP_NONSURFACE_INFO
));
1164 dev_info(adev
->dev
, " HDP_NONSURFACE_SIZE=0x%08X\n",
1165 RREG32(mmHDP_NONSURFACE_SIZE
));
1166 dev_info(adev
->dev
, " HDP_MISC_CNTL=0x%08X\n",
1167 RREG32(mmHDP_MISC_CNTL
));
1168 dev_info(adev
->dev
, " HDP_HOST_PATH_CNTL=0x%08X\n",
1169 RREG32(mmHDP_HOST_PATH_CNTL
));
1171 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
1172 dev_info(adev
->dev
, " %d:\n", i
);
1173 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1174 0xb05 + j
, RREG32(0xb05 + j
));
1175 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1176 0xb06 + j
, RREG32(0xb06 + j
));
1177 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1178 0xb07 + j
, RREG32(0xb07 + j
));
1179 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1180 0xb08 + j
, RREG32(0xb08 + j
));
1181 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1182 0xb09 + j
, RREG32(0xb09 + j
));
1185 dev_info(adev
->dev
, " BIF_FB_EN=0x%08X\n",
1186 RREG32(mmBIF_FB_EN
));
1189 static int gmc_v7_0_soft_reset(void *handle
)
1191 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1192 struct amdgpu_mode_mc_save save
;
1193 u32 srbm_soft_reset
= 0;
1194 u32 tmp
= RREG32(mmSRBM_STATUS
);
1196 if (tmp
& SRBM_STATUS__VMC_BUSY_MASK
)
1197 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1198 SRBM_SOFT_RESET
, SOFT_RESET_VMC
, 1);
1200 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1201 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
)) {
1202 if (!(adev
->flags
& AMD_IS_APU
))
1203 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1204 SRBM_SOFT_RESET
, SOFT_RESET_MC
, 1);
1207 if (srbm_soft_reset
) {
1208 gmc_v7_0_print_status((void *)adev
);
1210 gmc_v7_0_mc_stop(adev
, &save
);
1211 if (gmc_v7_0_wait_for_idle(adev
)) {
1212 dev_warn(adev
->dev
, "Wait for GMC idle timed out !\n");
1216 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1217 tmp
|= srbm_soft_reset
;
1218 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1219 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1220 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1224 tmp
&= ~srbm_soft_reset
;
1225 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1226 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1228 /* Wait a little for things to settle down */
1231 gmc_v7_0_mc_resume(adev
, &save
);
1234 gmc_v7_0_print_status((void *)adev
);
1240 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
1241 struct amdgpu_irq_src
*src
,
1243 enum amdgpu_interrupt_state state
)
1246 u32 bits
= (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1247 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1248 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1249 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1250 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1251 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
);
1254 case AMDGPU_IRQ_STATE_DISABLE
:
1255 /* system context */
1256 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1258 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1260 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1262 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1264 case AMDGPU_IRQ_STATE_ENABLE
:
1265 /* system context */
1266 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1268 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1270 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1272 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1281 static int gmc_v7_0_process_interrupt(struct amdgpu_device
*adev
,
1282 struct amdgpu_irq_src
*source
,
1283 struct amdgpu_iv_entry
*entry
)
1285 u32 addr
, status
, mc_client
;
1287 addr
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
);
1288 status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
1289 mc_client
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT
);
1290 /* reset addr and status */
1291 WREG32_P(mmVM_CONTEXT1_CNTL2
, 1, ~1);
1293 if (!addr
&& !status
)
1296 if (amdgpu_vm_fault_stop
== AMDGPU_VM_FAULT_STOP_FIRST
)
1297 gmc_v7_0_set_fault_enable_default(adev
, false);
1299 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1300 entry
->src_id
, entry
->src_data
);
1301 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1303 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1305 gmc_v7_0_vm_decode_fault(adev
, status
, addr
, mc_client
);
1310 static int gmc_v7_0_set_clockgating_state(void *handle
,
1311 enum amd_clockgating_state state
)
1314 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1316 if (state
== AMD_CG_STATE_GATE
)
1319 if (!(adev
->flags
& AMD_IS_APU
)) {
1320 gmc_v7_0_enable_mc_mgcg(adev
, gate
);
1321 gmc_v7_0_enable_mc_ls(adev
, gate
);
1323 gmc_v7_0_enable_bif_mgls(adev
, gate
);
1324 gmc_v7_0_enable_hdp_mgcg(adev
, gate
);
1325 gmc_v7_0_enable_hdp_ls(adev
, gate
);
1330 static int gmc_v7_0_set_powergating_state(void *handle
,
1331 enum amd_powergating_state state
)
1336 const struct amd_ip_funcs gmc_v7_0_ip_funcs
= {
1337 .early_init
= gmc_v7_0_early_init
,
1338 .late_init
= gmc_v7_0_late_init
,
1339 .sw_init
= gmc_v7_0_sw_init
,
1340 .sw_fini
= gmc_v7_0_sw_fini
,
1341 .hw_init
= gmc_v7_0_hw_init
,
1342 .hw_fini
= gmc_v7_0_hw_fini
,
1343 .suspend
= gmc_v7_0_suspend
,
1344 .resume
= gmc_v7_0_resume
,
1345 .is_idle
= gmc_v7_0_is_idle
,
1346 .wait_for_idle
= gmc_v7_0_wait_for_idle
,
1347 .soft_reset
= gmc_v7_0_soft_reset
,
1348 .print_status
= gmc_v7_0_print_status
,
1349 .set_clockgating_state
= gmc_v7_0_set_clockgating_state
,
1350 .set_powergating_state
= gmc_v7_0_set_powergating_state
,
1353 static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs
= {
1354 .flush_gpu_tlb
= gmc_v7_0_gart_flush_gpu_tlb
,
1355 .set_pte_pde
= gmc_v7_0_gart_set_pte_pde
,
1358 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs
= {
1359 .set
= gmc_v7_0_vm_fault_interrupt_state
,
1360 .process
= gmc_v7_0_process_interrupt
,
1363 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device
*adev
)
1365 if (adev
->gart
.gart_funcs
== NULL
)
1366 adev
->gart
.gart_funcs
= &gmc_v7_0_gart_funcs
;
1369 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device
*adev
)
1371 adev
->mc
.vm_fault
.num_types
= 1;
1372 adev
->mc
.vm_fault
.funcs
= &gmc_v7_0_irq_funcs
;