2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
29 #include "amdgpu_ucode.h"
31 #include "bif/bif_4_1_d.h"
32 #include "bif/bif_4_1_sh_mask.h"
34 #include "gmc/gmc_7_1_d.h"
35 #include "gmc/gmc_7_1_sh_mask.h"
37 #include "oss/oss_2_0_d.h"
38 #include "oss/oss_2_0_sh_mask.h"
40 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device
*adev
);
41 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device
*adev
);
43 MODULE_FIRMWARE("radeon/boniare_mc.bin");
44 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
47 * gmc8_mc_wait_for_idle - wait for MC idle callback.
49 * @adev: amdgpu_device pointer
51 * Wait for the MC (memory controller) to be idle.
53 * Returns 0 if the MC is idle, -1 if not.
55 int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device
*adev
)
60 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
62 tmp
= RREG32(mmSRBM_STATUS
) & 0x1F00;
70 void gmc_v7_0_mc_stop(struct amdgpu_device
*adev
,
71 struct amdgpu_mode_mc_save
*save
)
75 if (adev
->mode_info
.num_crtc
)
76 amdgpu_display_stop_mc_access(adev
, save
);
78 amdgpu_asic_wait_for_mc_idle(adev
);
80 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
81 if (REG_GET_FIELD(blackout
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
) != 1) {
82 /* Block CPU access */
83 WREG32(mmBIF_FB_EN
, 0);
85 blackout
= REG_SET_FIELD(blackout
,
86 MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
87 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
89 /* wait for the MC to settle */
93 void gmc_v7_0_mc_resume(struct amdgpu_device
*adev
,
94 struct amdgpu_mode_mc_save
*save
)
98 /* unblackout the MC */
99 tmp
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
100 tmp
= REG_SET_FIELD(tmp
, MC_SHARED_BLACKOUT_CNTL
, BLACKOUT_MODE
, 0);
101 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, tmp
);
102 /* allow CPU access */
103 tmp
= REG_SET_FIELD(0, BIF_FB_EN
, FB_READ_EN
, 1);
104 tmp
= REG_SET_FIELD(tmp
, BIF_FB_EN
, FB_WRITE_EN
, 1);
105 WREG32(mmBIF_FB_EN
, tmp
);
107 if (adev
->mode_info
.num_crtc
)
108 amdgpu_display_resume_mc_access(adev
, save
);
112 * gmc_v7_0_init_microcode - load ucode images from disk
114 * @adev: amdgpu_device pointer
116 * Use the firmware interface to load the ucode images into
117 * the driver (not loaded into hw).
118 * Returns 0 on success, error on failure.
120 static int gmc_v7_0_init_microcode(struct amdgpu_device
*adev
)
122 const char *chip_name
;
128 switch (adev
->asic_type
) {
130 chip_name
= "bonaire";
133 chip_name
= "hawaii";
141 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_mc.bin", chip_name
);
142 err
= request_firmware(&adev
->mc
.fw
, fw_name
, adev
->dev
);
145 err
= amdgpu_ucode_validate(adev
->mc
.fw
);
150 "cik_mc: Failed to load firmware \"%s\"\n",
152 release_firmware(adev
->mc
.fw
);
159 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
161 * @adev: amdgpu_device pointer
163 * Load the GDDR MC ucode into the hw (CIK).
164 * Returns 0 on success, error on failure.
166 static int gmc_v7_0_mc_load_microcode(struct amdgpu_device
*adev
)
168 const struct mc_firmware_header_v1_0
*hdr
;
169 const __le32
*fw_data
= NULL
;
170 const __le32
*io_mc_regs
= NULL
;
171 u32 running
, blackout
= 0;
172 int i
, ucode_size
, regs_size
;
177 hdr
= (const struct mc_firmware_header_v1_0
*)adev
->mc
.fw
->data
;
178 amdgpu_ucode_print_mc_hdr(&hdr
->header
);
180 adev
->mc
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
181 regs_size
= le32_to_cpu(hdr
->io_debug_size_bytes
) / (4 * 2);
182 io_mc_regs
= (const __le32
*)
183 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->io_debug_array_offset_bytes
));
184 ucode_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
185 fw_data
= (const __le32
*)
186 (adev
->mc
.fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
188 running
= REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL
), MC_SEQ_SUP_CNTL
, RUN
);
192 blackout
= RREG32(mmMC_SHARED_BLACKOUT_CNTL
);
193 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
196 /* reset the engine and set to writable */
197 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
198 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000010);
200 /* load mc io regs */
201 for (i
= 0; i
< regs_size
; i
++) {
202 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, le32_to_cpup(io_mc_regs
++));
203 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, le32_to_cpup(io_mc_regs
++));
205 /* load the MC ucode */
206 for (i
= 0; i
< ucode_size
; i
++)
207 WREG32(mmMC_SEQ_SUP_PGM
, le32_to_cpup(fw_data
++));
209 /* put the engine back into the active state */
210 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000008);
211 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000004);
212 WREG32(mmMC_SEQ_SUP_CNTL
, 0x00000001);
214 /* wait for training to complete */
215 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
216 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
217 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D0
))
221 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
222 if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL
),
223 MC_SEQ_TRAIN_WAKEUP_CNTL
, TRAIN_DONE_D1
))
229 WREG32(mmMC_SHARED_BLACKOUT_CNTL
, blackout
);
235 static void gmc_v7_0_vram_gtt_location(struct amdgpu_device
*adev
,
236 struct amdgpu_mc
*mc
)
238 if (mc
->mc_vram_size
> 0xFFC0000000ULL
) {
239 /* leave room for at least 1024M GTT */
240 dev_warn(adev
->dev
, "limiting VRAM\n");
241 mc
->real_vram_size
= 0xFFC0000000ULL
;
242 mc
->mc_vram_size
= 0xFFC0000000ULL
;
244 amdgpu_vram_location(adev
, &adev
->mc
, 0);
245 adev
->mc
.gtt_base_align
= 0;
246 amdgpu_gtt_location(adev
, mc
);
250 * gmc_v7_0_mc_program - program the GPU memory controller
252 * @adev: amdgpu_device pointer
254 * Set the location of vram, gart, and AGP in the GPU's
255 * physical address space (CIK).
257 static void gmc_v7_0_mc_program(struct amdgpu_device
*adev
)
259 struct amdgpu_mode_mc_save save
;
264 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
265 WREG32((0xb05 + j
), 0x00000000);
266 WREG32((0xb06 + j
), 0x00000000);
267 WREG32((0xb07 + j
), 0x00000000);
268 WREG32((0xb08 + j
), 0x00000000);
269 WREG32((0xb09 + j
), 0x00000000);
271 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
, 0);
273 if (adev
->mode_info
.num_crtc
)
274 amdgpu_display_set_vga_render_state(adev
, false);
276 gmc_v7_0_mc_stop(adev
, &save
);
277 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
278 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
280 /* Update configuration */
281 WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
,
282 adev
->mc
.vram_start
>> 12);
283 WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
284 adev
->mc
.vram_end
>> 12);
285 WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
286 adev
->vram_scratch
.gpu_addr
>> 12);
287 tmp
= ((adev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
288 tmp
|= ((adev
->mc
.vram_start
>> 24) & 0xFFFF);
289 WREG32(mmMC_VM_FB_LOCATION
, tmp
);
290 /* XXX double check these! */
291 WREG32(mmHDP_NONSURFACE_BASE
, (adev
->mc
.vram_start
>> 8));
292 WREG32(mmHDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
293 WREG32(mmHDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
294 WREG32(mmMC_VM_AGP_BASE
, 0);
295 WREG32(mmMC_VM_AGP_TOP
, 0x0FFFFFFF);
296 WREG32(mmMC_VM_AGP_BOT
, 0x0FFFFFFF);
297 if (amdgpu_asic_wait_for_mc_idle(adev
)) {
298 dev_warn(adev
->dev
, "Wait for MC idle timedout !\n");
300 gmc_v7_0_mc_resume(adev
, &save
);
302 WREG32(mmBIF_FB_EN
, BIF_FB_EN__FB_READ_EN_MASK
| BIF_FB_EN__FB_WRITE_EN_MASK
);
304 tmp
= RREG32(mmHDP_MISC_CNTL
);
305 tmp
= REG_SET_FIELD(tmp
, HDP_MISC_CNTL
, FLUSH_INVALIDATE_CACHE
, 1);
306 WREG32(mmHDP_MISC_CNTL
, tmp
);
308 tmp
= RREG32(mmHDP_HOST_PATH_CNTL
);
309 WREG32(mmHDP_HOST_PATH_CNTL
, tmp
);
313 * gmc_v7_0_mc_init - initialize the memory controller driver params
315 * @adev: amdgpu_device pointer
317 * Look up the amount of vram, vram width, and decide how to place
318 * vram and gart within the GPU's physical address space (CIK).
319 * Returns 0 for success.
321 static int gmc_v7_0_mc_init(struct amdgpu_device
*adev
)
324 int chansize
, numchan
;
326 /* Get VRAM informations */
327 tmp
= RREG32(mmMC_ARB_RAMCFG
);
328 if (REG_GET_FIELD(tmp
, MC_ARB_RAMCFG
, CHANSIZE
)) {
333 tmp
= RREG32(mmMC_SHARED_CHMAP
);
334 switch (REG_GET_FIELD(tmp
, MC_SHARED_CHMAP
, NOOFCHAN
)) {
364 adev
->mc
.vram_width
= numchan
* chansize
;
365 /* Could aper size report 0 ? */
366 adev
->mc
.aper_base
= pci_resource_start(adev
->pdev
, 0);
367 adev
->mc
.aper_size
= pci_resource_len(adev
->pdev
, 0);
368 /* size in MB on si */
369 adev
->mc
.mc_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
370 adev
->mc
.real_vram_size
= RREG32(mmCONFIG_MEMSIZE
) * 1024ULL * 1024ULL;
371 adev
->mc
.visible_vram_size
= adev
->mc
.aper_size
;
373 /* unless the user had overridden it, set the gart
374 * size equal to the 1024 or vram, whichever is larger.
376 if (amdgpu_gart_size
== -1)
377 adev
->mc
.gtt_size
= max((1024ULL << 20), adev
->mc
.mc_vram_size
);
379 adev
->mc
.gtt_size
= (uint64_t)amdgpu_gart_size
<< 20;
381 gmc_v7_0_vram_gtt_location(adev
, &adev
->mc
);
388 * VMID 0 is the physical GPU addresses as used by the kernel.
389 * VMIDs 1-15 are used for userspace clients and are handled
390 * by the amdgpu vm/hsa code.
394 * gmc_v7_0_gart_flush_gpu_tlb - gart tlb flush callback
396 * @adev: amdgpu_device pointer
397 * @vmid: vm instance to flush
399 * Flush the TLB for the requested page table (CIK).
401 static void gmc_v7_0_gart_flush_gpu_tlb(struct amdgpu_device
*adev
,
404 /* flush hdp cache */
405 WREG32(mmHDP_MEM_COHERENCY_FLUSH_CNTL
, 0);
407 /* bits 0-15 are the VM contexts0-15 */
408 WREG32(mmVM_INVALIDATE_REQUEST
, 1 << vmid
);
412 * gmc_v7_0_gart_set_pte_pde - update the page tables using MMIO
414 * @adev: amdgpu_device pointer
415 * @cpu_pt_addr: cpu address of the page table
416 * @gpu_page_idx: entry in the page table to update
417 * @addr: dst addr to write into pte/pde
418 * @flags: access flags
420 * Update the page tables using the CPU.
422 static int gmc_v7_0_gart_set_pte_pde(struct amdgpu_device
*adev
,
424 uint32_t gpu_page_idx
,
428 void __iomem
*ptr
= (void *)cpu_pt_addr
;
431 value
= addr
& 0xFFFFFFFFFFFFF000ULL
;
433 writeq(value
, ptr
+ (gpu_page_idx
* 8));
439 * gmc_v7_0_gart_enable - gart enable
441 * @adev: amdgpu_device pointer
443 * This sets up the TLBs, programs the page tables for VMID0,
444 * sets up the hw for VMIDs 1-15 which are allocated on
445 * demand, and sets up the global locations for the LDS, GDS,
446 * and GPUVM for FSA64 clients (CIK).
447 * Returns 0 for success, errors for failure.
449 static int gmc_v7_0_gart_enable(struct amdgpu_device
*adev
)
454 if (adev
->gart
.robj
== NULL
) {
455 dev_err(adev
->dev
, "No VRAM object for PCIE GART.\n");
458 r
= amdgpu_gart_table_vram_pin(adev
);
461 /* Setup TLB control */
462 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
463 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 1);
464 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 1);
465 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE
, 3);
466 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 1);
467 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, SYSTEM_APERTURE_UNMAPPED_ACCESS
, 0);
468 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
470 tmp
= RREG32(mmVM_L2_CNTL
);
471 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 1);
472 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
, 1);
473 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
, 1);
474 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
, 1);
475 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, EFFECTIVE_L2_QUEUE_SIZE
, 7);
476 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, CONTEXT1_IDENTITY_ACCESS_MODE
, 1);
477 WREG32(mmVM_L2_CNTL
, tmp
);
478 tmp
= REG_SET_FIELD(0, VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
, 1);
479 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL2
, INVALIDATE_L2_CACHE
, 1);
480 WREG32(mmVM_L2_CNTL2
, tmp
);
481 tmp
= RREG32(mmVM_L2_CNTL3
);
482 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
, 1);
483 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, BANK_SELECT
, 4);
484 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL3
, L2_CACHE_BIGK_FRAGMENT_SIZE
, 4);
485 WREG32(mmVM_L2_CNTL3
, tmp
);
487 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
, adev
->mc
.gtt_start
>> 12);
488 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
, (adev
->mc
.gtt_end
>> 12) - 1);
489 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, adev
->gart
.table_addr
>> 12);
490 WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
491 (u32
)(adev
->dummy_page
.addr
>> 12));
492 WREG32(mmVM_CONTEXT0_CNTL2
, 0);
493 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
494 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
, 1);
495 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, PAGE_TABLE_DEPTH
, 0);
496 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT0_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
497 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
503 /* empty context1-15 */
504 /* FIXME start with 4G, once using 2 level pt switch to full
507 /* set vm size, must be a multiple of 4 */
508 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
509 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
, adev
->vm_manager
.max_pfn
- 1);
510 for (i
= 1; i
< 16; i
++) {
512 WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
,
513 adev
->gart
.table_addr
>> 12);
515 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8,
516 adev
->gart
.table_addr
>> 12);
519 /* enable context1-15 */
520 WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
521 (u32
)(adev
->dummy_page
.addr
>> 12));
522 WREG32(mmVM_CONTEXT1_CNTL2
, 4);
523 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
524 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
, 1);
525 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_DEPTH
, 1);
526 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
527 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
528 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PDE0_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
529 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, VALID_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
530 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, READ_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
531 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, WRITE_PROTECTION_FAULT_ENABLE_DEFAULT
, 1);
532 tmp
= REG_SET_FIELD(tmp
, VM_CONTEXT1_CNTL
, PAGE_TABLE_BLOCK_SIZE
,
533 amdgpu_vm_block_size
- 9);
534 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
536 if (adev
->asic_type
== CHIP_KAVERI
) {
537 tmp
= RREG32(mmCHUB_CONTROL
);
539 WREG32(mmCHUB_CONTROL
, tmp
);
542 gmc_v7_0_gart_flush_gpu_tlb(adev
, 0);
543 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
544 (unsigned)(adev
->mc
.gtt_size
>> 20),
545 (unsigned long long)adev
->gart
.table_addr
);
546 adev
->gart
.ready
= true;
550 static int gmc_v7_0_gart_init(struct amdgpu_device
*adev
)
554 if (adev
->gart
.robj
) {
555 WARN(1, "R600 PCIE GART already initialized\n");
558 /* Initialize common gart structure */
559 r
= amdgpu_gart_init(adev
);
562 adev
->gart
.table_size
= adev
->gart
.num_gpu_pages
* 8;
563 return amdgpu_gart_table_vram_alloc(adev
);
567 * gmc_v7_0_gart_disable - gart disable
569 * @adev: amdgpu_device pointer
571 * This disables all VM page table (CIK).
573 static void gmc_v7_0_gart_disable(struct amdgpu_device
*adev
)
577 /* Disable all tables */
578 WREG32(mmVM_CONTEXT0_CNTL
, 0);
579 WREG32(mmVM_CONTEXT1_CNTL
, 0);
580 /* Setup TLB control */
581 tmp
= RREG32(mmMC_VM_MX_L1_TLB_CNTL
);
582 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_TLB
, 0);
583 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_L1_FRAGMENT_PROCESSING
, 0);
584 tmp
= REG_SET_FIELD(tmp
, MC_VM_MX_L1_TLB_CNTL
, ENABLE_ADVANCED_DRIVER_MODEL
, 0);
585 WREG32(mmMC_VM_MX_L1_TLB_CNTL
, tmp
);
587 tmp
= RREG32(mmVM_L2_CNTL
);
588 tmp
= REG_SET_FIELD(tmp
, VM_L2_CNTL
, ENABLE_L2_CACHE
, 0);
589 WREG32(mmVM_L2_CNTL
, tmp
);
590 WREG32(mmVM_L2_CNTL2
, 0);
591 amdgpu_gart_table_vram_unpin(adev
);
595 * gmc_v7_0_gart_fini - vm fini callback
597 * @adev: amdgpu_device pointer
599 * Tears down the driver GART/VM setup (CIK).
601 static void gmc_v7_0_gart_fini(struct amdgpu_device
*adev
)
603 amdgpu_gart_table_vram_free(adev
);
604 amdgpu_gart_fini(adev
);
609 * VMID 0 is the physical GPU addresses as used by the kernel.
610 * VMIDs 1-15 are used for userspace clients and are handled
611 * by the amdgpu vm/hsa code.
614 * gmc_v7_0_vm_init - cik vm init callback
616 * @adev: amdgpu_device pointer
618 * Inits cik specific vm parameters (number of VMs, base of vram for
620 * Returns 0 for success.
622 static int gmc_v7_0_vm_init(struct amdgpu_device
*adev
)
626 * VMID 0 is reserved for System
627 * amdgpu graphics/compute will use VMIDs 1-7
628 * amdkfd will use VMIDs 8-15
630 adev
->vm_manager
.nvm
= AMDGPU_NUM_OF_VMIDS
;
632 /* base offset of vram pages */
633 if (adev
->flags
& AMD_IS_APU
) {
634 u64 tmp
= RREG32(mmMC_VM_FB_OFFSET
);
636 adev
->vm_manager
.vram_base_offset
= tmp
;
638 adev
->vm_manager
.vram_base_offset
= 0;
644 * gmc_v7_0_vm_fini - cik vm fini callback
646 * @adev: amdgpu_device pointer
648 * Tear down any asic specific VM setup (CIK).
650 static void gmc_v7_0_vm_fini(struct amdgpu_device
*adev
)
655 * gmc_v7_0_vm_decode_fault - print human readable fault info
657 * @adev: amdgpu_device pointer
658 * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value
659 * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value
661 * Print human readable fault information (CIK).
663 static void gmc_v7_0_vm_decode_fault(struct amdgpu_device
*adev
,
664 u32 status
, u32 addr
, u32 mc_client
)
667 u32 vmid
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
, VMID
);
668 u32 protections
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
670 char block
[5] = { mc_client
>> 24, (mc_client
>> 16) & 0xff,
671 (mc_client
>> 8) & 0xff, mc_client
& 0xff, 0 };
673 mc_id
= REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
676 printk("VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n",
677 protections
, vmid
, addr
,
678 REG_GET_FIELD(status
, VM_CONTEXT1_PROTECTION_FAULT_STATUS
,
680 "write" : "read", block
, mc_client
, mc_id
);
684 static const u32 mc_cg_registers
[] = {
685 mmMC_HUB_MISC_HUB_CG
,
686 mmMC_HUB_MISC_SIP_CG
,
690 mmMC_CITF_MISC_WR_CG
,
691 mmMC_CITF_MISC_RD_CG
,
692 mmMC_CITF_MISC_VM_CG
,
696 static const u32 mc_cg_ls_en
[] = {
697 MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK
,
698 MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK
,
699 MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK
,
700 MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK
,
701 ATC_MISC_CG__MEM_LS_ENABLE_MASK
,
702 MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK
,
703 MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK
,
704 MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK
,
705 VM_L2_CG__MEM_LS_ENABLE_MASK
,
708 static const u32 mc_cg_en
[] = {
709 MC_HUB_MISC_HUB_CG__ENABLE_MASK
,
710 MC_HUB_MISC_SIP_CG__ENABLE_MASK
,
711 MC_HUB_MISC_VM_CG__ENABLE_MASK
,
712 MC_XPB_CLK_GAT__ENABLE_MASK
,
713 ATC_MISC_CG__ENABLE_MASK
,
714 MC_CITF_MISC_WR_CG__ENABLE_MASK
,
715 MC_CITF_MISC_RD_CG__ENABLE_MASK
,
716 MC_CITF_MISC_VM_CG__ENABLE_MASK
,
717 VM_L2_CG__ENABLE_MASK
,
720 static void gmc_v7_0_enable_mc_ls(struct amdgpu_device
*adev
,
726 for (i
= 0; i
< ARRAY_SIZE(mc_cg_registers
); i
++) {
727 orig
= data
= RREG32(mc_cg_registers
[i
]);
728 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_MC_LS
))
729 data
|= mc_cg_ls_en
[i
];
731 data
&= ~mc_cg_ls_en
[i
];
733 WREG32(mc_cg_registers
[i
], data
);
737 static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device
*adev
,
743 for (i
= 0; i
< ARRAY_SIZE(mc_cg_registers
); i
++) {
744 orig
= data
= RREG32(mc_cg_registers
[i
]);
745 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_MC_MGCG
))
748 data
&= ~mc_cg_en
[i
];
750 WREG32(mc_cg_registers
[i
], data
);
754 static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device
*adev
,
759 orig
= data
= RREG32_PCIE(ixPCIE_CNTL2
);
761 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_BIF_LS
)) {
762 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_LS_EN
, 1);
763 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, MST_MEM_LS_EN
, 1);
764 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, REPLAY_MEM_LS_EN
, 1);
765 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_AGGRESSIVE_LS_EN
, 1);
767 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_LS_EN
, 0);
768 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, MST_MEM_LS_EN
, 0);
769 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, REPLAY_MEM_LS_EN
, 0);
770 data
= REG_SET_FIELD(data
, PCIE_CNTL2
, SLV_MEM_AGGRESSIVE_LS_EN
, 0);
774 WREG32_PCIE(ixPCIE_CNTL2
, data
);
777 static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device
*adev
,
782 orig
= data
= RREG32(mmHDP_HOST_PATH_CNTL
);
784 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_HDP_MGCG
))
785 data
= REG_SET_FIELD(data
, HDP_HOST_PATH_CNTL
, CLOCK_GATING_DIS
, 0);
787 data
= REG_SET_FIELD(data
, HDP_HOST_PATH_CNTL
, CLOCK_GATING_DIS
, 1);
790 WREG32(mmHDP_HOST_PATH_CNTL
, data
);
793 static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device
*adev
,
798 orig
= data
= RREG32(mmHDP_MEM_POWER_LS
);
800 if (enable
&& (adev
->cg_flags
& AMDGPU_CG_SUPPORT_HDP_LS
))
801 data
= REG_SET_FIELD(data
, HDP_MEM_POWER_LS
, LS_ENABLE
, 1);
803 data
= REG_SET_FIELD(data
, HDP_MEM_POWER_LS
, LS_ENABLE
, 0);
806 WREG32(mmHDP_MEM_POWER_LS
, data
);
809 static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type
)
811 switch (mc_seq_vram_type
) {
812 case MC_SEQ_MISC0__MT__GDDR1
:
813 return AMDGPU_VRAM_TYPE_GDDR1
;
814 case MC_SEQ_MISC0__MT__DDR2
:
815 return AMDGPU_VRAM_TYPE_DDR2
;
816 case MC_SEQ_MISC0__MT__GDDR3
:
817 return AMDGPU_VRAM_TYPE_GDDR3
;
818 case MC_SEQ_MISC0__MT__GDDR4
:
819 return AMDGPU_VRAM_TYPE_GDDR4
;
820 case MC_SEQ_MISC0__MT__GDDR5
:
821 return AMDGPU_VRAM_TYPE_GDDR5
;
822 case MC_SEQ_MISC0__MT__HBM
:
823 return AMDGPU_VRAM_TYPE_HBM
;
824 case MC_SEQ_MISC0__MT__DDR3
:
825 return AMDGPU_VRAM_TYPE_DDR3
;
827 return AMDGPU_VRAM_TYPE_UNKNOWN
;
831 static int gmc_v7_0_early_init(void *handle
)
833 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
835 gmc_v7_0_set_gart_funcs(adev
);
836 gmc_v7_0_set_irq_funcs(adev
);
838 if (adev
->flags
& AMD_IS_APU
) {
839 adev
->mc
.vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
841 u32 tmp
= RREG32(mmMC_SEQ_MISC0
);
842 tmp
&= MC_SEQ_MISC0__MT__MASK
;
843 adev
->mc
.vram_type
= gmc_v7_0_convert_vram_type(tmp
);
849 static int gmc_v7_0_late_init(void *handle
)
851 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
853 return amdgpu_irq_get(adev
, &adev
->mc
.vm_fault
, 0);
856 static int gmc_v7_0_sw_init(void *handle
)
860 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
862 r
= amdgpu_gem_init(adev
);
866 r
= amdgpu_irq_add_id(adev
, 146, &adev
->mc
.vm_fault
);
870 r
= amdgpu_irq_add_id(adev
, 147, &adev
->mc
.vm_fault
);
874 /* Adjust VM size here.
875 * Currently set to 4GB ((1 << 20) 4k pages).
876 * Max GPUVM size for cayman and SI is 40 bits.
878 adev
->vm_manager
.max_pfn
= amdgpu_vm_size
<< 18;
880 /* Set the internal MC address mask
881 * This is the max address of the GPU's
882 * internal address space.
884 adev
->mc
.mc_mask
= 0xffffffffffULL
; /* 40 bit MC */
886 /* set DMA mask + need_dma32 flags.
887 * PCIE - can handle 40-bits.
888 * IGP - can handle 40-bits
889 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
891 adev
->need_dma32
= false;
892 dma_bits
= adev
->need_dma32
? 32 : 40;
893 r
= pci_set_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
895 adev
->need_dma32
= true;
897 printk(KERN_WARNING
"amdgpu: No suitable DMA available.\n");
899 r
= pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(dma_bits
));
901 pci_set_consistent_dma_mask(adev
->pdev
, DMA_BIT_MASK(32));
902 printk(KERN_WARNING
"amdgpu: No coherent DMA available.\n");
905 r
= gmc_v7_0_init_microcode(adev
);
907 DRM_ERROR("Failed to load mc firmware!\n");
911 r
= gmc_v7_0_mc_init(adev
);
916 r
= amdgpu_bo_init(adev
);
920 r
= gmc_v7_0_gart_init(adev
);
924 if (!adev
->vm_manager
.enabled
) {
925 r
= gmc_v7_0_vm_init(adev
);
927 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
930 adev
->vm_manager
.enabled
= true;
936 static int gmc_v7_0_sw_fini(void *handle
)
939 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
941 if (adev
->vm_manager
.enabled
) {
942 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
)
943 amdgpu_fence_unref(&adev
->vm_manager
.active
[i
]);
944 gmc_v7_0_vm_fini(adev
);
945 adev
->vm_manager
.enabled
= false;
947 gmc_v7_0_gart_fini(adev
);
948 amdgpu_gem_fini(adev
);
949 amdgpu_bo_fini(adev
);
954 static int gmc_v7_0_hw_init(void *handle
)
957 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
959 gmc_v7_0_mc_program(adev
);
961 if (!(adev
->flags
& AMD_IS_APU
)) {
962 r
= gmc_v7_0_mc_load_microcode(adev
);
964 DRM_ERROR("Failed to load MC firmware!\n");
969 r
= gmc_v7_0_gart_enable(adev
);
976 static int gmc_v7_0_hw_fini(void *handle
)
978 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
980 amdgpu_irq_put(adev
, &adev
->mc
.vm_fault
, 0);
981 gmc_v7_0_gart_disable(adev
);
986 static int gmc_v7_0_suspend(void *handle
)
989 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
991 if (adev
->vm_manager
.enabled
) {
992 for (i
= 0; i
< AMDGPU_NUM_VM
; ++i
)
993 amdgpu_fence_unref(&adev
->vm_manager
.active
[i
]);
994 gmc_v7_0_vm_fini(adev
);
995 adev
->vm_manager
.enabled
= false;
997 gmc_v7_0_hw_fini(adev
);
1002 static int gmc_v7_0_resume(void *handle
)
1005 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1007 r
= gmc_v7_0_hw_init(adev
);
1011 if (!adev
->vm_manager
.enabled
) {
1012 r
= gmc_v7_0_vm_init(adev
);
1014 dev_err(adev
->dev
, "vm manager initialization failed (%d).\n", r
);
1017 adev
->vm_manager
.enabled
= true;
1023 static bool gmc_v7_0_is_idle(void *handle
)
1025 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1026 u32 tmp
= RREG32(mmSRBM_STATUS
);
1028 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1029 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
| SRBM_STATUS__VMC_BUSY_MASK
))
1035 static int gmc_v7_0_wait_for_idle(void *handle
)
1039 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1041 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1042 /* read MC_STATUS */
1043 tmp
= RREG32(mmSRBM_STATUS
) & (SRBM_STATUS__MCB_BUSY_MASK
|
1044 SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1045 SRBM_STATUS__MCC_BUSY_MASK
|
1046 SRBM_STATUS__MCD_BUSY_MASK
|
1047 SRBM_STATUS__VMC_BUSY_MASK
);
1056 static void gmc_v7_0_print_status(void *handle
)
1059 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1061 dev_info(adev
->dev
, "GMC 8.x registers\n");
1062 dev_info(adev
->dev
, " SRBM_STATUS=0x%08X\n",
1063 RREG32(mmSRBM_STATUS
));
1064 dev_info(adev
->dev
, " SRBM_STATUS2=0x%08X\n",
1065 RREG32(mmSRBM_STATUS2
));
1067 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1068 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
));
1069 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1070 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
));
1071 dev_info(adev
->dev
, " MC_VM_MX_L1_TLB_CNTL=0x%08X\n",
1072 RREG32(mmMC_VM_MX_L1_TLB_CNTL
));
1073 dev_info(adev
->dev
, " VM_L2_CNTL=0x%08X\n",
1074 RREG32(mmVM_L2_CNTL
));
1075 dev_info(adev
->dev
, " VM_L2_CNTL2=0x%08X\n",
1076 RREG32(mmVM_L2_CNTL2
));
1077 dev_info(adev
->dev
, " VM_L2_CNTL3=0x%08X\n",
1078 RREG32(mmVM_L2_CNTL3
));
1079 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_START_ADDR=0x%08X\n",
1080 RREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR
));
1081 dev_info(adev
->dev
, " VM_CONTEXT0_PAGE_TABLE_END_ADDR=0x%08X\n",
1082 RREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR
));
1083 dev_info(adev
->dev
, " VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1084 RREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
));
1085 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL2=0x%08X\n",
1086 RREG32(mmVM_CONTEXT0_CNTL2
));
1087 dev_info(adev
->dev
, " VM_CONTEXT0_CNTL=0x%08X\n",
1088 RREG32(mmVM_CONTEXT0_CNTL
));
1089 dev_info(adev
->dev
, " 0x15D4=0x%08X\n",
1091 dev_info(adev
->dev
, " 0x15D8=0x%08X\n",
1093 dev_info(adev
->dev
, " 0x15DC=0x%08X\n",
1095 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_START_ADDR=0x%08X\n",
1096 RREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR
));
1097 dev_info(adev
->dev
, " VM_CONTEXT1_PAGE_TABLE_END_ADDR=0x%08X\n",
1098 RREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR
));
1099 dev_info(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR=0x%08X\n",
1100 RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
));
1101 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL2=0x%08X\n",
1102 RREG32(mmVM_CONTEXT1_CNTL2
));
1103 dev_info(adev
->dev
, " VM_CONTEXT1_CNTL=0x%08X\n",
1104 RREG32(mmVM_CONTEXT1_CNTL
));
1105 for (i
= 0; i
< 16; i
++) {
1107 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1108 i
, RREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ i
));
1110 dev_info(adev
->dev
, " VM_CONTEXT%d_PAGE_TABLE_BASE_ADDR=0x%08X\n",
1111 i
, RREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ i
- 8));
1113 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_LOW_ADDR=0x%08X\n",
1114 RREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR
));
1115 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_HIGH_ADDR=0x%08X\n",
1116 RREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR
));
1117 dev_info(adev
->dev
, " MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR=0x%08X\n",
1118 RREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
));
1119 dev_info(adev
->dev
, " MC_VM_FB_LOCATION=0x%08X\n",
1120 RREG32(mmMC_VM_FB_LOCATION
));
1121 dev_info(adev
->dev
, " MC_VM_AGP_BASE=0x%08X\n",
1122 RREG32(mmMC_VM_AGP_BASE
));
1123 dev_info(adev
->dev
, " MC_VM_AGP_TOP=0x%08X\n",
1124 RREG32(mmMC_VM_AGP_TOP
));
1125 dev_info(adev
->dev
, " MC_VM_AGP_BOT=0x%08X\n",
1126 RREG32(mmMC_VM_AGP_BOT
));
1128 if (adev
->asic_type
== CHIP_KAVERI
) {
1129 dev_info(adev
->dev
, " CHUB_CONTROL=0x%08X\n",
1130 RREG32(mmCHUB_CONTROL
));
1133 dev_info(adev
->dev
, " HDP_REG_COHERENCY_FLUSH_CNTL=0x%08X\n",
1134 RREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL
));
1135 dev_info(adev
->dev
, " HDP_NONSURFACE_BASE=0x%08X\n",
1136 RREG32(mmHDP_NONSURFACE_BASE
));
1137 dev_info(adev
->dev
, " HDP_NONSURFACE_INFO=0x%08X\n",
1138 RREG32(mmHDP_NONSURFACE_INFO
));
1139 dev_info(adev
->dev
, " HDP_NONSURFACE_SIZE=0x%08X\n",
1140 RREG32(mmHDP_NONSURFACE_SIZE
));
1141 dev_info(adev
->dev
, " HDP_MISC_CNTL=0x%08X\n",
1142 RREG32(mmHDP_MISC_CNTL
));
1143 dev_info(adev
->dev
, " HDP_HOST_PATH_CNTL=0x%08X\n",
1144 RREG32(mmHDP_HOST_PATH_CNTL
));
1146 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x6) {
1147 dev_info(adev
->dev
, " %d:\n", i
);
1148 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1149 0xb05 + j
, RREG32(0xb05 + j
));
1150 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1151 0xb06 + j
, RREG32(0xb06 + j
));
1152 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1153 0xb07 + j
, RREG32(0xb07 + j
));
1154 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1155 0xb08 + j
, RREG32(0xb08 + j
));
1156 dev_info(adev
->dev
, " 0x%04X=0x%08X\n",
1157 0xb09 + j
, RREG32(0xb09 + j
));
1160 dev_info(adev
->dev
, " BIF_FB_EN=0x%08X\n",
1161 RREG32(mmBIF_FB_EN
));
1164 static int gmc_v7_0_soft_reset(void *handle
)
1166 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1167 struct amdgpu_mode_mc_save save
;
1168 u32 srbm_soft_reset
= 0;
1169 u32 tmp
= RREG32(mmSRBM_STATUS
);
1171 if (tmp
& SRBM_STATUS__VMC_BUSY_MASK
)
1172 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1173 SRBM_SOFT_RESET
, SOFT_RESET_VMC
, 1);
1175 if (tmp
& (SRBM_STATUS__MCB_BUSY_MASK
| SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK
|
1176 SRBM_STATUS__MCC_BUSY_MASK
| SRBM_STATUS__MCD_BUSY_MASK
)) {
1177 if (!(adev
->flags
& AMD_IS_APU
))
1178 srbm_soft_reset
= REG_SET_FIELD(srbm_soft_reset
,
1179 SRBM_SOFT_RESET
, SOFT_RESET_MC
, 1);
1182 if (srbm_soft_reset
) {
1183 gmc_v7_0_print_status((void *)adev
);
1185 gmc_v7_0_mc_stop(adev
, &save
);
1186 if (gmc_v7_0_wait_for_idle(adev
)) {
1187 dev_warn(adev
->dev
, "Wait for GMC idle timed out !\n");
1191 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1192 tmp
|= srbm_soft_reset
;
1193 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
1194 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1195 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1199 tmp
&= ~srbm_soft_reset
;
1200 WREG32(mmSRBM_SOFT_RESET
, tmp
);
1201 tmp
= RREG32(mmSRBM_SOFT_RESET
);
1203 /* Wait a little for things to settle down */
1206 gmc_v7_0_mc_resume(adev
, &save
);
1209 gmc_v7_0_print_status((void *)adev
);
1215 static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device
*adev
,
1216 struct amdgpu_irq_src
*src
,
1218 enum amdgpu_interrupt_state state
)
1221 u32 bits
= (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1222 VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1223 VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1224 VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1225 VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
|
1226 VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK
);
1229 case AMDGPU_IRQ_STATE_DISABLE
:
1230 /* system context */
1231 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1233 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1235 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1237 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1239 case AMDGPU_IRQ_STATE_ENABLE
:
1240 /* system context */
1241 tmp
= RREG32(mmVM_CONTEXT0_CNTL
);
1243 WREG32(mmVM_CONTEXT0_CNTL
, tmp
);
1245 tmp
= RREG32(mmVM_CONTEXT1_CNTL
);
1247 WREG32(mmVM_CONTEXT1_CNTL
, tmp
);
1256 static int gmc_v7_0_process_interrupt(struct amdgpu_device
*adev
,
1257 struct amdgpu_irq_src
*source
,
1258 struct amdgpu_iv_entry
*entry
)
1260 u32 addr
, status
, mc_client
;
1262 addr
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR
);
1263 status
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS
);
1264 mc_client
= RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT
);
1265 /* reset addr and status */
1266 WREG32_P(mmVM_CONTEXT1_CNTL2
, 1, ~1);
1268 if (!addr
&& !status
)
1271 dev_err(adev
->dev
, "GPU fault detected: %d 0x%08x\n",
1272 entry
->src_id
, entry
->src_data
);
1273 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
1275 dev_err(adev
->dev
, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
1277 gmc_v7_0_vm_decode_fault(adev
, status
, addr
, mc_client
);
1282 static int gmc_v7_0_set_clockgating_state(void *handle
,
1283 enum amd_clockgating_state state
)
1286 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1288 if (state
== AMD_CG_STATE_GATE
)
1291 if (!(adev
->flags
& AMD_IS_APU
)) {
1292 gmc_v7_0_enable_mc_mgcg(adev
, gate
);
1293 gmc_v7_0_enable_mc_ls(adev
, gate
);
1295 gmc_v7_0_enable_bif_mgls(adev
, gate
);
1296 gmc_v7_0_enable_hdp_mgcg(adev
, gate
);
1297 gmc_v7_0_enable_hdp_ls(adev
, gate
);
1302 static int gmc_v7_0_set_powergating_state(void *handle
,
1303 enum amd_powergating_state state
)
1308 const struct amd_ip_funcs gmc_v7_0_ip_funcs
= {
1309 .early_init
= gmc_v7_0_early_init
,
1310 .late_init
= gmc_v7_0_late_init
,
1311 .sw_init
= gmc_v7_0_sw_init
,
1312 .sw_fini
= gmc_v7_0_sw_fini
,
1313 .hw_init
= gmc_v7_0_hw_init
,
1314 .hw_fini
= gmc_v7_0_hw_fini
,
1315 .suspend
= gmc_v7_0_suspend
,
1316 .resume
= gmc_v7_0_resume
,
1317 .is_idle
= gmc_v7_0_is_idle
,
1318 .wait_for_idle
= gmc_v7_0_wait_for_idle
,
1319 .soft_reset
= gmc_v7_0_soft_reset
,
1320 .print_status
= gmc_v7_0_print_status
,
1321 .set_clockgating_state
= gmc_v7_0_set_clockgating_state
,
1322 .set_powergating_state
= gmc_v7_0_set_powergating_state
,
1325 static const struct amdgpu_gart_funcs gmc_v7_0_gart_funcs
= {
1326 .flush_gpu_tlb
= gmc_v7_0_gart_flush_gpu_tlb
,
1327 .set_pte_pde
= gmc_v7_0_gart_set_pte_pde
,
1330 static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs
= {
1331 .set
= gmc_v7_0_vm_fault_interrupt_state
,
1332 .process
= gmc_v7_0_process_interrupt
,
1335 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device
*adev
)
1337 if (adev
->gart
.gart_funcs
== NULL
)
1338 adev
->gart
.gart_funcs
= &gmc_v7_0_gart_funcs
;
1341 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device
*adev
)
1343 adev
->mc
.vm_fault
.num_types
= 1;
1344 adev
->mc
.vm_fault
.funcs
= &gmc_v7_0_irq_funcs
;