2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/slab.h>
29 #include <linux/seq_file.h>
30 #include <linux/firmware.h>
31 #include <linux/platform_device.h>
33 #include "radeon_drm.h"
35 #include "radeon_asic.h"
36 #include "radeon_mode.h"
41 #define PFP_UCODE_SIZE 576
42 #define PM4_UCODE_SIZE 1792
43 #define RLC_UCODE_SIZE 768
44 #define R700_PFP_UCODE_SIZE 848
45 #define R700_PM4_UCODE_SIZE 1360
46 #define R700_RLC_UCODE_SIZE 1024
47 #define EVERGREEN_PFP_UCODE_SIZE 1120
48 #define EVERGREEN_PM4_UCODE_SIZE 1376
49 #define EVERGREEN_RLC_UCODE_SIZE 768
52 MODULE_FIRMWARE("radeon/R600_pfp.bin");
53 MODULE_FIRMWARE("radeon/R600_me.bin");
54 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
55 MODULE_FIRMWARE("radeon/RV610_me.bin");
56 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
57 MODULE_FIRMWARE("radeon/RV630_me.bin");
58 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
59 MODULE_FIRMWARE("radeon/RV620_me.bin");
60 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
61 MODULE_FIRMWARE("radeon/RV635_me.bin");
62 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
63 MODULE_FIRMWARE("radeon/RV670_me.bin");
64 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
65 MODULE_FIRMWARE("radeon/RS780_me.bin");
66 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
67 MODULE_FIRMWARE("radeon/RV770_me.bin");
68 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
69 MODULE_FIRMWARE("radeon/RV730_me.bin");
70 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
71 MODULE_FIRMWARE("radeon/RV710_me.bin");
72 MODULE_FIRMWARE("radeon/R600_rlc.bin");
73 MODULE_FIRMWARE("radeon/R700_rlc.bin");
74 MODULE_FIRMWARE("radeon/CEDAR_pfp.bin");
75 MODULE_FIRMWARE("radeon/CEDAR_me.bin");
76 MODULE_FIRMWARE("radeon/CEDAR_rlc.bin");
77 MODULE_FIRMWARE("radeon/REDWOOD_pfp.bin");
78 MODULE_FIRMWARE("radeon/REDWOOD_me.bin");
79 MODULE_FIRMWARE("radeon/REDWOOD_rlc.bin");
80 MODULE_FIRMWARE("radeon/JUNIPER_pfp.bin");
81 MODULE_FIRMWARE("radeon/JUNIPER_me.bin");
82 MODULE_FIRMWARE("radeon/JUNIPER_rlc.bin");
83 MODULE_FIRMWARE("radeon/CYPRESS_pfp.bin");
84 MODULE_FIRMWARE("radeon/CYPRESS_me.bin");
85 MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
87 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
89 /* r600,rv610,rv630,rv620,rv635,rv670 */
90 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
91 void r600_gpu_init(struct radeon_device
*rdev
);
92 void r600_fini(struct radeon_device
*rdev
);
93 void r600_irq_disable(struct radeon_device
*rdev
);
95 bool r600_gui_idle(struct radeon_device
*rdev
)
97 if (RREG32(GRBM_STATUS
) & GUI_ACTIVE
)
103 /* hpd for digital panel detect/disconnect */
104 bool r600_hpd_sense(struct radeon_device
*rdev
, enum radeon_hpd_id hpd
)
106 bool connected
= false;
108 if (ASIC_IS_DCE3(rdev
)) {
111 if (RREG32(DC_HPD1_INT_STATUS
) & DC_HPDx_SENSE
)
115 if (RREG32(DC_HPD2_INT_STATUS
) & DC_HPDx_SENSE
)
119 if (RREG32(DC_HPD3_INT_STATUS
) & DC_HPDx_SENSE
)
123 if (RREG32(DC_HPD4_INT_STATUS
) & DC_HPDx_SENSE
)
128 if (RREG32(DC_HPD5_INT_STATUS
) & DC_HPDx_SENSE
)
132 if (RREG32(DC_HPD6_INT_STATUS
) & DC_HPDx_SENSE
)
141 if (RREG32(DC_HOT_PLUG_DETECT1_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
145 if (RREG32(DC_HOT_PLUG_DETECT2_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
149 if (RREG32(DC_HOT_PLUG_DETECT3_INT_STATUS
) & DC_HOT_PLUG_DETECTx_SENSE
)
159 void r600_hpd_set_polarity(struct radeon_device
*rdev
,
160 enum radeon_hpd_id hpd
)
163 bool connected
= r600_hpd_sense(rdev
, hpd
);
165 if (ASIC_IS_DCE3(rdev
)) {
168 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
170 tmp
&= ~DC_HPDx_INT_POLARITY
;
172 tmp
|= DC_HPDx_INT_POLARITY
;
173 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
176 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
178 tmp
&= ~DC_HPDx_INT_POLARITY
;
180 tmp
|= DC_HPDx_INT_POLARITY
;
181 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
184 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
186 tmp
&= ~DC_HPDx_INT_POLARITY
;
188 tmp
|= DC_HPDx_INT_POLARITY
;
189 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
192 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
194 tmp
&= ~DC_HPDx_INT_POLARITY
;
196 tmp
|= DC_HPDx_INT_POLARITY
;
197 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
200 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
202 tmp
&= ~DC_HPDx_INT_POLARITY
;
204 tmp
|= DC_HPDx_INT_POLARITY
;
205 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
209 tmp
= RREG32(DC_HPD6_INT_CONTROL
);
211 tmp
&= ~DC_HPDx_INT_POLARITY
;
213 tmp
|= DC_HPDx_INT_POLARITY
;
214 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
222 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
224 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
226 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
227 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
230 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
232 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
234 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
235 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
238 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
240 tmp
&= ~DC_HOT_PLUG_DETECTx_INT_POLARITY
;
242 tmp
|= DC_HOT_PLUG_DETECTx_INT_POLARITY
;
243 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
251 void r600_hpd_init(struct radeon_device
*rdev
)
253 struct drm_device
*dev
= rdev
->ddev
;
254 struct drm_connector
*connector
;
256 if (ASIC_IS_DCE3(rdev
)) {
257 u32 tmp
= DC_HPDx_CONNECTION_TIMER(0x9c4) | DC_HPDx_RX_INT_TIMER(0xfa);
258 if (ASIC_IS_DCE32(rdev
))
261 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
262 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
263 switch (radeon_connector
->hpd
.hpd
) {
265 WREG32(DC_HPD1_CONTROL
, tmp
);
266 rdev
->irq
.hpd
[0] = true;
269 WREG32(DC_HPD2_CONTROL
, tmp
);
270 rdev
->irq
.hpd
[1] = true;
273 WREG32(DC_HPD3_CONTROL
, tmp
);
274 rdev
->irq
.hpd
[2] = true;
277 WREG32(DC_HPD4_CONTROL
, tmp
);
278 rdev
->irq
.hpd
[3] = true;
282 WREG32(DC_HPD5_CONTROL
, tmp
);
283 rdev
->irq
.hpd
[4] = true;
286 WREG32(DC_HPD6_CONTROL
, tmp
);
287 rdev
->irq
.hpd
[5] = true;
294 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
295 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
296 switch (radeon_connector
->hpd
.hpd
) {
298 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
299 rdev
->irq
.hpd
[0] = true;
302 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
303 rdev
->irq
.hpd
[1] = true;
306 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, DC_HOT_PLUG_DETECTx_EN
);
307 rdev
->irq
.hpd
[2] = true;
314 if (rdev
->irq
.installed
)
318 void r600_hpd_fini(struct radeon_device
*rdev
)
320 struct drm_device
*dev
= rdev
->ddev
;
321 struct drm_connector
*connector
;
323 if (ASIC_IS_DCE3(rdev
)) {
324 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
325 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
326 switch (radeon_connector
->hpd
.hpd
) {
328 WREG32(DC_HPD1_CONTROL
, 0);
329 rdev
->irq
.hpd
[0] = false;
332 WREG32(DC_HPD2_CONTROL
, 0);
333 rdev
->irq
.hpd
[1] = false;
336 WREG32(DC_HPD3_CONTROL
, 0);
337 rdev
->irq
.hpd
[2] = false;
340 WREG32(DC_HPD4_CONTROL
, 0);
341 rdev
->irq
.hpd
[3] = false;
345 WREG32(DC_HPD5_CONTROL
, 0);
346 rdev
->irq
.hpd
[4] = false;
349 WREG32(DC_HPD6_CONTROL
, 0);
350 rdev
->irq
.hpd
[5] = false;
357 list_for_each_entry(connector
, &dev
->mode_config
.connector_list
, head
) {
358 struct radeon_connector
*radeon_connector
= to_radeon_connector(connector
);
359 switch (radeon_connector
->hpd
.hpd
) {
361 WREG32(DC_HOT_PLUG_DETECT1_CONTROL
, 0);
362 rdev
->irq
.hpd
[0] = false;
365 WREG32(DC_HOT_PLUG_DETECT2_CONTROL
, 0);
366 rdev
->irq
.hpd
[1] = false;
369 WREG32(DC_HOT_PLUG_DETECT3_CONTROL
, 0);
370 rdev
->irq
.hpd
[2] = false;
382 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
387 /* flush hdp cache so updates hit vram */
388 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
390 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
391 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
392 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
393 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
395 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
396 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
398 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
408 int r600_pcie_gart_init(struct radeon_device
*rdev
)
412 if (rdev
->gart
.table
.vram
.robj
) {
413 WARN(1, "R600 PCIE GART already initialized.\n");
416 /* Initialize common gart structure */
417 r
= radeon_gart_init(rdev
);
420 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
421 return radeon_gart_table_vram_alloc(rdev
);
424 int r600_pcie_gart_enable(struct radeon_device
*rdev
)
429 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
430 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
433 r
= radeon_gart_table_vram_pin(rdev
);
436 radeon_gart_restore(rdev
);
439 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
440 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
441 EFFECTIVE_L2_QUEUE_SIZE(7));
442 WREG32(VM_L2_CNTL2
, 0);
443 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
444 /* Setup TLB control */
445 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
446 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
447 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
448 ENABLE_WAIT_L2_QUERY
;
449 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
450 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
451 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
452 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
453 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
454 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
455 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
456 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
457 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
458 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
459 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
460 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
461 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
462 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
463 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
464 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
465 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
466 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
467 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
468 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
469 (u32
)(rdev
->dummy_page
.addr
>> 12));
470 for (i
= 1; i
< 7; i
++)
471 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
473 r600_pcie_gart_tlb_flush(rdev
);
474 rdev
->gart
.ready
= true;
478 void r600_pcie_gart_disable(struct radeon_device
*rdev
)
483 /* Disable all tables */
484 for (i
= 0; i
< 7; i
++)
485 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
487 /* Disable L2 cache */
488 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
489 EFFECTIVE_L2_QUEUE_SIZE(7));
490 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
491 /* Setup L1 TLB control */
492 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
493 ENABLE_WAIT_L2_QUERY
;
494 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
495 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
496 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
497 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
498 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
499 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
500 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
501 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
502 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
503 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
504 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
505 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
506 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
507 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
508 if (rdev
->gart
.table
.vram
.robj
) {
509 r
= radeon_bo_reserve(rdev
->gart
.table
.vram
.robj
, false);
510 if (likely(r
== 0)) {
511 radeon_bo_kunmap(rdev
->gart
.table
.vram
.robj
);
512 radeon_bo_unpin(rdev
->gart
.table
.vram
.robj
);
513 radeon_bo_unreserve(rdev
->gart
.table
.vram
.robj
);
518 void r600_pcie_gart_fini(struct radeon_device
*rdev
)
520 radeon_gart_fini(rdev
);
521 r600_pcie_gart_disable(rdev
);
522 radeon_gart_table_vram_free(rdev
);
525 void r600_agp_enable(struct radeon_device
*rdev
)
531 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
532 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
533 EFFECTIVE_L2_QUEUE_SIZE(7));
534 WREG32(VM_L2_CNTL2
, 0);
535 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
536 /* Setup TLB control */
537 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
538 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
539 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
540 ENABLE_WAIT_L2_QUERY
;
541 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
542 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
543 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
544 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
545 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
546 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
547 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
548 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
549 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
550 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
551 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
552 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
553 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
554 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
555 for (i
= 0; i
< 7; i
++)
556 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
559 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
564 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
566 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
574 static void r600_mc_program(struct radeon_device
*rdev
)
576 struct rv515_mc_save save
;
581 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
582 WREG32((0x2c14 + j
), 0x00000000);
583 WREG32((0x2c18 + j
), 0x00000000);
584 WREG32((0x2c1c + j
), 0x00000000);
585 WREG32((0x2c20 + j
), 0x00000000);
586 WREG32((0x2c24 + j
), 0x00000000);
588 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
590 rv515_mc_stop(rdev
, &save
);
591 if (r600_mc_wait_for_idle(rdev
)) {
592 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
594 /* Lockout access through VGA aperture (doesn't exist before R600) */
595 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
596 /* Update configuration */
597 if (rdev
->flags
& RADEON_IS_AGP
) {
598 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
599 /* VRAM before AGP */
600 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
601 rdev
->mc
.vram_start
>> 12);
602 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
603 rdev
->mc
.gtt_end
>> 12);
606 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
607 rdev
->mc
.gtt_start
>> 12);
608 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
609 rdev
->mc
.vram_end
>> 12);
612 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
613 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
615 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
616 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
617 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
618 WREG32(MC_VM_FB_LOCATION
, tmp
);
619 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
620 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
621 WREG32(HDP_NONSURFACE_SIZE
, rdev
->mc
.mc_vram_size
| 0x3FF);
622 if (rdev
->flags
& RADEON_IS_AGP
) {
623 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
624 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
625 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
627 WREG32(MC_VM_AGP_BASE
, 0);
628 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
629 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
631 if (r600_mc_wait_for_idle(rdev
)) {
632 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
634 rv515_mc_resume(rdev
, &save
);
635 /* we need to own VRAM, so turn off the VGA renderer here
636 * to stop it overwriting our objects */
637 rv515_vga_render_disable(rdev
);
641 * r600_vram_gtt_location - try to find VRAM & GTT location
642 * @rdev: radeon device structure holding all necessary informations
643 * @mc: memory controller structure holding memory informations
645 * Function will place try to place VRAM at same place as in CPU (PCI)
646 * address space as some GPU seems to have issue when we reprogram at
647 * different address space.
649 * If there is not enough space to fit the unvisible VRAM after the
650 * aperture then we limit the VRAM size to the aperture.
652 * If we are using AGP then place VRAM adjacent to AGP aperture are we need
653 * them to be in one from GPU point of view so that we can program GPU to
654 * catch access outside them (weird GPU policy see ??).
656 * This function will never fails, worst case are limiting VRAM or GTT.
658 * Note: GTT start, end, size should be initialized before calling this
659 * function on AGP platform.
661 void r600_vram_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
663 u64 size_bf
, size_af
;
665 if (mc
->mc_vram_size
> 0xE0000000) {
666 /* leave room for at least 512M GTT */
667 dev_warn(rdev
->dev
, "limiting VRAM\n");
668 mc
->real_vram_size
= 0xE0000000;
669 mc
->mc_vram_size
= 0xE0000000;
671 if (rdev
->flags
& RADEON_IS_AGP
) {
672 size_bf
= mc
->gtt_start
;
673 size_af
= 0xFFFFFFFF - mc
->gtt_end
+ 1;
674 if (size_bf
> size_af
) {
675 if (mc
->mc_vram_size
> size_bf
) {
676 dev_warn(rdev
->dev
, "limiting VRAM\n");
677 mc
->real_vram_size
= size_bf
;
678 mc
->mc_vram_size
= size_bf
;
680 mc
->vram_start
= mc
->gtt_start
- mc
->mc_vram_size
;
682 if (mc
->mc_vram_size
> size_af
) {
683 dev_warn(rdev
->dev
, "limiting VRAM\n");
684 mc
->real_vram_size
= size_af
;
685 mc
->mc_vram_size
= size_af
;
687 mc
->vram_start
= mc
->gtt_end
;
689 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
690 dev_info(rdev
->dev
, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
691 mc
->mc_vram_size
>> 20, mc
->vram_start
,
692 mc
->vram_end
, mc
->real_vram_size
>> 20);
695 if (rdev
->flags
& RADEON_IS_IGP
)
696 base
= (RREG32(MC_VM_FB_LOCATION
) & 0xFFFF) << 24;
697 radeon_vram_location(rdev
, &rdev
->mc
, base
);
698 radeon_gtt_location(rdev
, mc
);
702 int r600_mc_init(struct radeon_device
*rdev
)
705 int chansize
, numchan
;
707 /* Get VRAM informations */
708 rdev
->mc
.vram_is_ddr
= true;
709 tmp
= RREG32(RAMCFG
);
710 if (tmp
& CHANSIZE_OVERRIDE
) {
712 } else if (tmp
& CHANSIZE_MASK
) {
718 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
733 rdev
->mc
.vram_width
= numchan
* chansize
;
734 /* Could aper size report 0 ? */
735 rdev
->mc
.aper_base
= drm_get_resource_start(rdev
->ddev
, 0);
736 rdev
->mc
.aper_size
= drm_get_resource_len(rdev
->ddev
, 0);
737 /* Setup GPU memory space */
738 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
739 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
740 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
741 r600_vram_gtt_location(rdev
, &rdev
->mc
);
743 if (rdev
->flags
& RADEON_IS_IGP
)
744 rdev
->mc
.igp_sideport_enabled
= radeon_atombios_sideport_present(rdev
);
745 radeon_update_bandwidth_info(rdev
);
749 /* We doesn't check that the GPU really needs a reset we simply do the
750 * reset, it's up to the caller to determine if the GPU needs one. We
751 * might add an helper function to check that.
753 int r600_gpu_soft_reset(struct radeon_device
*rdev
)
755 struct rv515_mc_save save
;
756 u32 grbm_busy_mask
= S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
757 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
758 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
759 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
760 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
761 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
762 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
763 S_008010_GUI_ACTIVE(1);
764 u32 grbm2_busy_mask
= S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
765 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
766 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
767 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
768 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
769 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
770 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
771 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
774 dev_info(rdev
->dev
, "GPU softreset \n");
775 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
776 RREG32(R_008010_GRBM_STATUS
));
777 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
778 RREG32(R_008014_GRBM_STATUS2
));
779 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
780 RREG32(R_000E50_SRBM_STATUS
));
781 rv515_mc_stop(rdev
, &save
);
782 if (r600_mc_wait_for_idle(rdev
)) {
783 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
785 /* Disable CP parsing/prefetching */
786 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
787 /* Check if any of the rendering block is busy and reset it */
788 if ((RREG32(R_008010_GRBM_STATUS
) & grbm_busy_mask
) ||
789 (RREG32(R_008014_GRBM_STATUS2
) & grbm2_busy_mask
)) {
790 tmp
= S_008020_SOFT_RESET_CR(1) |
791 S_008020_SOFT_RESET_DB(1) |
792 S_008020_SOFT_RESET_CB(1) |
793 S_008020_SOFT_RESET_PA(1) |
794 S_008020_SOFT_RESET_SC(1) |
795 S_008020_SOFT_RESET_SMX(1) |
796 S_008020_SOFT_RESET_SPI(1) |
797 S_008020_SOFT_RESET_SX(1) |
798 S_008020_SOFT_RESET_SH(1) |
799 S_008020_SOFT_RESET_TC(1) |
800 S_008020_SOFT_RESET_TA(1) |
801 S_008020_SOFT_RESET_VC(1) |
802 S_008020_SOFT_RESET_VGT(1);
803 dev_info(rdev
->dev
, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
804 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
805 RREG32(R_008020_GRBM_SOFT_RESET
);
807 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
809 /* Reset CP (we always reset CP) */
810 tmp
= S_008020_SOFT_RESET_CP(1);
811 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
812 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
813 RREG32(R_008020_GRBM_SOFT_RESET
);
815 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
816 /* Wait a little for things to settle down */
818 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
819 RREG32(R_008010_GRBM_STATUS
));
820 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
821 RREG32(R_008014_GRBM_STATUS2
));
822 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
823 RREG32(R_000E50_SRBM_STATUS
));
824 rv515_mc_resume(rdev
, &save
);
828 bool r600_gpu_is_lockup(struct radeon_device
*rdev
)
835 srbm_status
= RREG32(R_000E50_SRBM_STATUS
);
836 grbm_status
= RREG32(R_008010_GRBM_STATUS
);
837 grbm_status2
= RREG32(R_008014_GRBM_STATUS2
);
838 if (!G_008010_GUI_ACTIVE(grbm_status
)) {
839 r100_gpu_lockup_update(&rdev
->config
.r300
.lockup
, &rdev
->cp
);
842 /* force CP activities */
843 r
= radeon_ring_lock(rdev
, 2);
846 radeon_ring_write(rdev
, 0x80000000);
847 radeon_ring_write(rdev
, 0x80000000);
848 radeon_ring_unlock_commit(rdev
);
850 rdev
->cp
.rptr
= RREG32(R600_CP_RB_RPTR
);
851 return r100_gpu_cp_is_lockup(rdev
, &rdev
->config
.r300
.lockup
, &rdev
->cp
);
854 int r600_asic_reset(struct radeon_device
*rdev
)
856 return r600_gpu_soft_reset(rdev
);
859 static u32
r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes
,
861 u32 backend_disable_mask
)
864 u32 enabled_backends_mask
;
865 u32 enabled_backends_count
;
867 u32 swizzle_pipe
[R6XX_MAX_PIPES
];
871 if (num_tile_pipes
> R6XX_MAX_PIPES
)
872 num_tile_pipes
= R6XX_MAX_PIPES
;
873 if (num_tile_pipes
< 1)
875 if (num_backends
> R6XX_MAX_BACKENDS
)
876 num_backends
= R6XX_MAX_BACKENDS
;
877 if (num_backends
< 1)
880 enabled_backends_mask
= 0;
881 enabled_backends_count
= 0;
882 for (i
= 0; i
< R6XX_MAX_BACKENDS
; ++i
) {
883 if (((backend_disable_mask
>> i
) & 1) == 0) {
884 enabled_backends_mask
|= (1 << i
);
885 ++enabled_backends_count
;
887 if (enabled_backends_count
== num_backends
)
891 if (enabled_backends_count
== 0) {
892 enabled_backends_mask
= 1;
893 enabled_backends_count
= 1;
896 if (enabled_backends_count
!= num_backends
)
897 num_backends
= enabled_backends_count
;
899 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * R6XX_MAX_PIPES
);
900 switch (num_tile_pipes
) {
956 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
957 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
958 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
960 backend_map
|= (u32
)(((cur_backend
& 3) << (swizzle_pipe
[cur_pipe
] * 2)));
962 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
968 int r600_count_pipe_bits(uint32_t val
)
972 for (i
= 0; i
< 32; i
++) {
979 void r600_gpu_init(struct radeon_device
*rdev
)
984 u32 cc_rb_backend_disable
;
985 u32 cc_gc_shader_pipe_config
;
989 u32 sq_gpr_resource_mgmt_1
= 0;
990 u32 sq_gpr_resource_mgmt_2
= 0;
991 u32 sq_thread_resource_mgmt
= 0;
992 u32 sq_stack_resource_mgmt_1
= 0;
993 u32 sq_stack_resource_mgmt_2
= 0;
995 /* FIXME: implement */
996 switch (rdev
->family
) {
998 rdev
->config
.r600
.max_pipes
= 4;
999 rdev
->config
.r600
.max_tile_pipes
= 8;
1000 rdev
->config
.r600
.max_simds
= 4;
1001 rdev
->config
.r600
.max_backends
= 4;
1002 rdev
->config
.r600
.max_gprs
= 256;
1003 rdev
->config
.r600
.max_threads
= 192;
1004 rdev
->config
.r600
.max_stack_entries
= 256;
1005 rdev
->config
.r600
.max_hw_contexts
= 8;
1006 rdev
->config
.r600
.max_gs_threads
= 16;
1007 rdev
->config
.r600
.sx_max_export_size
= 128;
1008 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1009 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1010 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1014 rdev
->config
.r600
.max_pipes
= 2;
1015 rdev
->config
.r600
.max_tile_pipes
= 2;
1016 rdev
->config
.r600
.max_simds
= 3;
1017 rdev
->config
.r600
.max_backends
= 1;
1018 rdev
->config
.r600
.max_gprs
= 128;
1019 rdev
->config
.r600
.max_threads
= 192;
1020 rdev
->config
.r600
.max_stack_entries
= 128;
1021 rdev
->config
.r600
.max_hw_contexts
= 8;
1022 rdev
->config
.r600
.max_gs_threads
= 4;
1023 rdev
->config
.r600
.sx_max_export_size
= 128;
1024 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1025 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1026 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1032 rdev
->config
.r600
.max_pipes
= 1;
1033 rdev
->config
.r600
.max_tile_pipes
= 1;
1034 rdev
->config
.r600
.max_simds
= 2;
1035 rdev
->config
.r600
.max_backends
= 1;
1036 rdev
->config
.r600
.max_gprs
= 128;
1037 rdev
->config
.r600
.max_threads
= 192;
1038 rdev
->config
.r600
.max_stack_entries
= 128;
1039 rdev
->config
.r600
.max_hw_contexts
= 4;
1040 rdev
->config
.r600
.max_gs_threads
= 4;
1041 rdev
->config
.r600
.sx_max_export_size
= 128;
1042 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1043 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1044 rdev
->config
.r600
.sq_num_cf_insts
= 1;
1047 rdev
->config
.r600
.max_pipes
= 4;
1048 rdev
->config
.r600
.max_tile_pipes
= 4;
1049 rdev
->config
.r600
.max_simds
= 4;
1050 rdev
->config
.r600
.max_backends
= 4;
1051 rdev
->config
.r600
.max_gprs
= 192;
1052 rdev
->config
.r600
.max_threads
= 192;
1053 rdev
->config
.r600
.max_stack_entries
= 256;
1054 rdev
->config
.r600
.max_hw_contexts
= 8;
1055 rdev
->config
.r600
.max_gs_threads
= 16;
1056 rdev
->config
.r600
.sx_max_export_size
= 128;
1057 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
1058 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
1059 rdev
->config
.r600
.sq_num_cf_insts
= 2;
1065 /* Initialize HDP */
1066 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1067 WREG32((0x2c14 + j
), 0x00000000);
1068 WREG32((0x2c18 + j
), 0x00000000);
1069 WREG32((0x2c1c + j
), 0x00000000);
1070 WREG32((0x2c20 + j
), 0x00000000);
1071 WREG32((0x2c24 + j
), 0x00000000);
1074 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1078 ramcfg
= RREG32(RAMCFG
);
1079 switch (rdev
->config
.r600
.max_tile_pipes
) {
1081 tiling_config
|= PIPE_TILING(0);
1084 tiling_config
|= PIPE_TILING(1);
1087 tiling_config
|= PIPE_TILING(2);
1090 tiling_config
|= PIPE_TILING(3);
1095 rdev
->config
.r600
.tiling_npipes
= rdev
->config
.r600
.max_tile_pipes
;
1096 rdev
->config
.r600
.tiling_nbanks
= 4 << ((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1097 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
1098 tiling_config
|= GROUP_SIZE(0);
1099 rdev
->config
.r600
.tiling_group_size
= 256;
1100 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
1102 tiling_config
|= ROW_TILING(3);
1103 tiling_config
|= SAMPLE_SPLIT(3);
1105 tiling_config
|= ROW_TILING(tmp
);
1106 tiling_config
|= SAMPLE_SPLIT(tmp
);
1108 tiling_config
|= BANK_SWAPS(1);
1110 cc_rb_backend_disable
= RREG32(CC_RB_BACKEND_DISABLE
) & 0x00ff0000;
1111 cc_rb_backend_disable
|=
1112 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK
<< rdev
->config
.r600
.max_backends
) & R6XX_MAX_BACKENDS_MASK
);
1114 cc_gc_shader_pipe_config
= RREG32(CC_GC_SHADER_PIPE_CONFIG
) & 0xffffff00;
1115 cc_gc_shader_pipe_config
|=
1116 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK
<< rdev
->config
.r600
.max_pipes
) & R6XX_MAX_PIPES_MASK
);
1117 cc_gc_shader_pipe_config
|=
1118 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK
<< rdev
->config
.r600
.max_simds
) & R6XX_MAX_SIMDS_MASK
);
1120 backend_map
= r600_get_tile_pipe_to_backend_map(rdev
->config
.r600
.max_tile_pipes
,
1121 (R6XX_MAX_BACKENDS
-
1122 r600_count_pipe_bits((cc_rb_backend_disable
&
1123 R6XX_MAX_BACKENDS_MASK
) >> 16)),
1124 (cc_rb_backend_disable
>> 16));
1126 tiling_config
|= BACKEND_MAP(backend_map
);
1127 WREG32(GB_TILING_CONFIG
, tiling_config
);
1128 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
1129 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
1132 WREG32(CC_RB_BACKEND_DISABLE
, cc_rb_backend_disable
);
1133 WREG32(CC_GC_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1134 WREG32(GC_USER_SHADER_PIPE_CONFIG
, cc_gc_shader_pipe_config
);
1136 tmp
= R6XX_MAX_PIPES
- r600_count_pipe_bits((cc_gc_shader_pipe_config
& INACTIVE_QD_PIPES_MASK
) >> 8);
1137 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
1138 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
1140 /* Setup some CP states */
1141 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
1142 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
1144 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
1145 SYNC_WALKER
| SYNC_ALIGNER
));
1146 /* Setup various GPU states */
1147 if (rdev
->family
== CHIP_RV670
)
1148 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
1150 tmp
= RREG32(SX_DEBUG_1
);
1151 tmp
|= SMX_EVENT_RELEASE
;
1152 if ((rdev
->family
> CHIP_R600
))
1153 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
1154 WREG32(SX_DEBUG_1
, tmp
);
1156 if (((rdev
->family
) == CHIP_R600
) ||
1157 ((rdev
->family
) == CHIP_RV630
) ||
1158 ((rdev
->family
) == CHIP_RV610
) ||
1159 ((rdev
->family
) == CHIP_RV620
) ||
1160 ((rdev
->family
) == CHIP_RS780
) ||
1161 ((rdev
->family
) == CHIP_RS880
)) {
1162 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
1164 WREG32(DB_DEBUG
, 0);
1166 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
1167 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
1169 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1170 WREG32(VGT_NUM_INSTANCES
, 0);
1172 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
1173 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
1175 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
1176 if (((rdev
->family
) == CHIP_RV610
) ||
1177 ((rdev
->family
) == CHIP_RV620
) ||
1178 ((rdev
->family
) == CHIP_RS780
) ||
1179 ((rdev
->family
) == CHIP_RS880
)) {
1180 tmp
= (CACHE_FIFO_SIZE(0xa) |
1181 FETCH_FIFO_HIWATER(0xa) |
1182 DONE_FIFO_HIWATER(0xe0) |
1183 ALU_UPDATE_FIFO_HIWATER(0x8));
1184 } else if (((rdev
->family
) == CHIP_R600
) ||
1185 ((rdev
->family
) == CHIP_RV630
)) {
1186 tmp
&= ~DONE_FIFO_HIWATER(0xff);
1187 tmp
|= DONE_FIFO_HIWATER(0x4);
1189 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
1191 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
1192 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
1194 sq_config
= RREG32(SQ_CONFIG
);
1195 sq_config
&= ~(PS_PRIO(3) |
1199 sq_config
|= (DX9_CONSTS
|
1206 if ((rdev
->family
) == CHIP_R600
) {
1207 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
1209 NUM_CLAUSE_TEMP_GPRS(4));
1210 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
1212 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
1213 NUM_VS_THREADS(48) |
1216 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
1217 NUM_VS_STACK_ENTRIES(128));
1218 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
1219 NUM_ES_STACK_ENTRIES(0));
1220 } else if (((rdev
->family
) == CHIP_RV610
) ||
1221 ((rdev
->family
) == CHIP_RV620
) ||
1222 ((rdev
->family
) == CHIP_RS780
) ||
1223 ((rdev
->family
) == CHIP_RS880
)) {
1224 /* no vertex cache */
1225 sq_config
&= ~VC_ENABLE
;
1227 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1229 NUM_CLAUSE_TEMP_GPRS(2));
1230 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1232 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1233 NUM_VS_THREADS(78) |
1235 NUM_ES_THREADS(31));
1236 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1237 NUM_VS_STACK_ENTRIES(40));
1238 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1239 NUM_ES_STACK_ENTRIES(16));
1240 } else if (((rdev
->family
) == CHIP_RV630
) ||
1241 ((rdev
->family
) == CHIP_RV635
)) {
1242 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1244 NUM_CLAUSE_TEMP_GPRS(2));
1245 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
1247 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1248 NUM_VS_THREADS(78) |
1250 NUM_ES_THREADS(31));
1251 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
1252 NUM_VS_STACK_ENTRIES(40));
1253 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
1254 NUM_ES_STACK_ENTRIES(16));
1255 } else if ((rdev
->family
) == CHIP_RV670
) {
1256 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
1258 NUM_CLAUSE_TEMP_GPRS(2));
1259 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
1261 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
1262 NUM_VS_THREADS(78) |
1264 NUM_ES_THREADS(31));
1265 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
1266 NUM_VS_STACK_ENTRIES(64));
1267 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
1268 NUM_ES_STACK_ENTRIES(64));
1271 WREG32(SQ_CONFIG
, sq_config
);
1272 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
1273 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
1274 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
1275 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
1276 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
1278 if (((rdev
->family
) == CHIP_RV610
) ||
1279 ((rdev
->family
) == CHIP_RV620
) ||
1280 ((rdev
->family
) == CHIP_RS780
) ||
1281 ((rdev
->family
) == CHIP_RS880
)) {
1282 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
1284 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
1287 /* More default values. 2D/3D driver should adjust as needed */
1288 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
1289 S1_X(0x4) | S1_Y(0xc)));
1290 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
1291 S1_X(0x2) | S1_Y(0x2) |
1292 S2_X(0xa) | S2_Y(0x6) |
1293 S3_X(0x6) | S3_Y(0xa)));
1294 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
1295 S1_X(0x4) | S1_Y(0xc) |
1296 S2_X(0x1) | S2_Y(0x6) |
1297 S3_X(0xa) | S3_Y(0xe)));
1298 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
1299 S5_X(0x0) | S5_Y(0x0) |
1300 S6_X(0xb) | S6_Y(0x4) |
1301 S7_X(0x7) | S7_Y(0x8)));
1303 WREG32(VGT_STRMOUT_EN
, 0);
1304 tmp
= rdev
->config
.r600
.max_pipes
* 16;
1305 switch (rdev
->family
) {
1321 WREG32(VGT_ES_PER_GS
, 128);
1322 WREG32(VGT_GS_PER_ES
, tmp
);
1323 WREG32(VGT_GS_PER_VS
, 2);
1324 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1326 /* more default values. 2D/3D driver should adjust as needed */
1327 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1328 WREG32(VGT_STRMOUT_EN
, 0);
1330 WREG32(PA_SC_MODE_CNTL
, 0);
1331 WREG32(PA_SC_AA_CONFIG
, 0);
1332 WREG32(PA_SC_LINE_STIPPLE
, 0);
1333 WREG32(SPI_INPUT_Z
, 0);
1334 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
1335 WREG32(CB_COLOR7_FRAG
, 0);
1337 /* Clear render buffer base addresses */
1338 WREG32(CB_COLOR0_BASE
, 0);
1339 WREG32(CB_COLOR1_BASE
, 0);
1340 WREG32(CB_COLOR2_BASE
, 0);
1341 WREG32(CB_COLOR3_BASE
, 0);
1342 WREG32(CB_COLOR4_BASE
, 0);
1343 WREG32(CB_COLOR5_BASE
, 0);
1344 WREG32(CB_COLOR6_BASE
, 0);
1345 WREG32(CB_COLOR7_BASE
, 0);
1346 WREG32(CB_COLOR7_FRAG
, 0);
1348 switch (rdev
->family
) {
1353 tmp
= TC_L2_SIZE(8);
1357 tmp
= TC_L2_SIZE(4);
1360 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
1363 tmp
= TC_L2_SIZE(0);
1366 WREG32(TC_CNTL
, tmp
);
1368 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
1369 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
1371 tmp
= RREG32(ARB_POP
);
1372 tmp
|= ENABLE_TC128
;
1373 WREG32(ARB_POP
, tmp
);
1375 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1376 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
1378 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
1383 * Indirect registers accessor
1385 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
1389 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1390 (void)RREG32(PCIE_PORT_INDEX
);
1391 r
= RREG32(PCIE_PORT_DATA
);
1395 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
1397 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1398 (void)RREG32(PCIE_PORT_INDEX
);
1399 WREG32(PCIE_PORT_DATA
, (v
));
1400 (void)RREG32(PCIE_PORT_DATA
);
1406 void r600_cp_stop(struct radeon_device
*rdev
)
1408 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1411 int r600_init_microcode(struct radeon_device
*rdev
)
1413 struct platform_device
*pdev
;
1414 const char *chip_name
;
1415 const char *rlc_chip_name
;
1416 size_t pfp_req_size
, me_req_size
, rlc_req_size
;
1422 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
1425 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
1429 switch (rdev
->family
) {
1432 rlc_chip_name
= "R600";
1435 chip_name
= "RV610";
1436 rlc_chip_name
= "R600";
1439 chip_name
= "RV630";
1440 rlc_chip_name
= "R600";
1443 chip_name
= "RV620";
1444 rlc_chip_name
= "R600";
1447 chip_name
= "RV635";
1448 rlc_chip_name
= "R600";
1451 chip_name
= "RV670";
1452 rlc_chip_name
= "R600";
1456 chip_name
= "RS780";
1457 rlc_chip_name
= "R600";
1460 chip_name
= "RV770";
1461 rlc_chip_name
= "R700";
1465 chip_name
= "RV730";
1466 rlc_chip_name
= "R700";
1469 chip_name
= "RV710";
1470 rlc_chip_name
= "R700";
1473 chip_name
= "CEDAR";
1474 rlc_chip_name
= "CEDAR";
1477 chip_name
= "REDWOOD";
1478 rlc_chip_name
= "REDWOOD";
1481 chip_name
= "JUNIPER";
1482 rlc_chip_name
= "JUNIPER";
1486 chip_name
= "CYPRESS";
1487 rlc_chip_name
= "CYPRESS";
1492 if (rdev
->family
>= CHIP_CEDAR
) {
1493 pfp_req_size
= EVERGREEN_PFP_UCODE_SIZE
* 4;
1494 me_req_size
= EVERGREEN_PM4_UCODE_SIZE
* 4;
1495 rlc_req_size
= EVERGREEN_RLC_UCODE_SIZE
* 4;
1496 } else if (rdev
->family
>= CHIP_RV770
) {
1497 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
1498 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
1499 rlc_req_size
= R700_RLC_UCODE_SIZE
* 4;
1501 pfp_req_size
= PFP_UCODE_SIZE
* 4;
1502 me_req_size
= PM4_UCODE_SIZE
* 12;
1503 rlc_req_size
= RLC_UCODE_SIZE
* 4;
1506 DRM_INFO("Loading %s Microcode\n", chip_name
);
1508 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
1509 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
1512 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
1514 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1515 rdev
->pfp_fw
->size
, fw_name
);
1520 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
1521 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
1524 if (rdev
->me_fw
->size
!= me_req_size
) {
1526 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1527 rdev
->me_fw
->size
, fw_name
);
1531 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
1532 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, &pdev
->dev
);
1535 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
1537 "r600_rlc: Bogus length %zu in firmware \"%s\"\n",
1538 rdev
->rlc_fw
->size
, fw_name
);
1543 platform_device_unregister(pdev
);
1548 "r600_cp: Failed to load firmware \"%s\"\n",
1550 release_firmware(rdev
->pfp_fw
);
1551 rdev
->pfp_fw
= NULL
;
1552 release_firmware(rdev
->me_fw
);
1554 release_firmware(rdev
->rlc_fw
);
1555 rdev
->rlc_fw
= NULL
;
1560 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
1562 const __be32
*fw_data
;
1565 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1570 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
1573 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
1574 RREG32(GRBM_SOFT_RESET
);
1576 WREG32(GRBM_SOFT_RESET
, 0);
1578 WREG32(CP_ME_RAM_WADDR
, 0);
1580 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1581 WREG32(CP_ME_RAM_WADDR
, 0);
1582 for (i
= 0; i
< PM4_UCODE_SIZE
* 3; i
++)
1583 WREG32(CP_ME_RAM_DATA
,
1584 be32_to_cpup(fw_data
++));
1586 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1587 WREG32(CP_PFP_UCODE_ADDR
, 0);
1588 for (i
= 0; i
< PFP_UCODE_SIZE
; i
++)
1589 WREG32(CP_PFP_UCODE_DATA
,
1590 be32_to_cpup(fw_data
++));
1592 WREG32(CP_PFP_UCODE_ADDR
, 0);
1593 WREG32(CP_ME_RAM_WADDR
, 0);
1594 WREG32(CP_ME_RAM_RADDR
, 0);
1598 int r600_cp_start(struct radeon_device
*rdev
)
1603 r
= radeon_ring_lock(rdev
, 7);
1605 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1608 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1609 radeon_ring_write(rdev
, 0x1);
1610 if (rdev
->family
>= CHIP_CEDAR
) {
1611 radeon_ring_write(rdev
, 0x0);
1612 radeon_ring_write(rdev
, rdev
->config
.evergreen
.max_hw_contexts
- 1);
1613 } else if (rdev
->family
>= CHIP_RV770
) {
1614 radeon_ring_write(rdev
, 0x0);
1615 radeon_ring_write(rdev
, rdev
->config
.rv770
.max_hw_contexts
- 1);
1617 radeon_ring_write(rdev
, 0x3);
1618 radeon_ring_write(rdev
, rdev
->config
.r600
.max_hw_contexts
- 1);
1620 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1621 radeon_ring_write(rdev
, 0);
1622 radeon_ring_write(rdev
, 0);
1623 radeon_ring_unlock_commit(rdev
);
1626 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
1630 int r600_cp_resume(struct radeon_device
*rdev
)
1637 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
1638 RREG32(GRBM_SOFT_RESET
);
1640 WREG32(GRBM_SOFT_RESET
, 0);
1642 /* Set ring buffer size */
1643 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
1644 tmp
= RB_NO_UPDATE
| (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
1646 tmp
|= BUF_SWAP_32BIT
;
1648 WREG32(CP_RB_CNTL
, tmp
);
1649 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
1651 /* Set the write pointer delay */
1652 WREG32(CP_RB_WPTR_DELAY
, 0);
1654 /* Initialize the ring buffer's read and write pointers */
1655 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
1656 WREG32(CP_RB_RPTR_WR
, 0);
1657 WREG32(CP_RB_WPTR
, 0);
1658 WREG32(CP_RB_RPTR_ADDR
, rdev
->cp
.gpu_addr
& 0xFFFFFFFF);
1659 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->cp
.gpu_addr
));
1661 WREG32(CP_RB_CNTL
, tmp
);
1663 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
1664 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
1666 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
1667 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
1669 r600_cp_start(rdev
);
1670 rdev
->cp
.ready
= true;
1671 r
= radeon_ring_test(rdev
);
1673 rdev
->cp
.ready
= false;
1679 void r600_cp_commit(struct radeon_device
*rdev
)
1681 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
1682 (void)RREG32(CP_RB_WPTR
);
1685 void r600_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
1689 /* Align ring size */
1690 rb_bufsz
= drm_order(ring_size
/ 8);
1691 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
1692 rdev
->cp
.ring_size
= ring_size
;
1693 rdev
->cp
.align_mask
= 16 - 1;
1696 void r600_cp_fini(struct radeon_device
*rdev
)
1699 radeon_ring_fini(rdev
);
1704 * GPU scratch registers helpers function.
1706 void r600_scratch_init(struct radeon_device
*rdev
)
1710 rdev
->scratch
.num_reg
= 7;
1711 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
1712 rdev
->scratch
.free
[i
] = true;
1713 rdev
->scratch
.reg
[i
] = SCRATCH_REG0
+ (i
* 4);
1717 int r600_ring_test(struct radeon_device
*rdev
)
1724 r
= radeon_scratch_get(rdev
, &scratch
);
1726 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
1729 WREG32(scratch
, 0xCAFEDEAD);
1730 r
= radeon_ring_lock(rdev
, 3);
1732 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1733 radeon_scratch_free(rdev
, scratch
);
1736 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1737 radeon_ring_write(rdev
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
1738 radeon_ring_write(rdev
, 0xDEADBEEF);
1739 radeon_ring_unlock_commit(rdev
);
1740 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1741 tmp
= RREG32(scratch
);
1742 if (tmp
== 0xDEADBEEF)
1746 if (i
< rdev
->usec_timeout
) {
1747 DRM_INFO("ring test succeeded in %d usecs\n", i
);
1749 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1753 radeon_scratch_free(rdev
, scratch
);
1757 void r600_wb_disable(struct radeon_device
*rdev
)
1761 WREG32(SCRATCH_UMSK
, 0);
1762 if (rdev
->wb
.wb_obj
) {
1763 r
= radeon_bo_reserve(rdev
->wb
.wb_obj
, false);
1764 if (unlikely(r
!= 0))
1766 radeon_bo_kunmap(rdev
->wb
.wb_obj
);
1767 radeon_bo_unpin(rdev
->wb
.wb_obj
);
1768 radeon_bo_unreserve(rdev
->wb
.wb_obj
);
1772 void r600_wb_fini(struct radeon_device
*rdev
)
1774 r600_wb_disable(rdev
);
1775 if (rdev
->wb
.wb_obj
) {
1776 radeon_bo_unref(&rdev
->wb
.wb_obj
);
1778 rdev
->wb
.wb_obj
= NULL
;
1782 int r600_wb_enable(struct radeon_device
*rdev
)
1786 if (rdev
->wb
.wb_obj
== NULL
) {
1787 r
= radeon_bo_create(rdev
, NULL
, RADEON_GPU_PAGE_SIZE
, true,
1788 RADEON_GEM_DOMAIN_GTT
, &rdev
->wb
.wb_obj
);
1790 dev_warn(rdev
->dev
, "(%d) create WB bo failed\n", r
);
1793 r
= radeon_bo_reserve(rdev
->wb
.wb_obj
, false);
1794 if (unlikely(r
!= 0)) {
1798 r
= radeon_bo_pin(rdev
->wb
.wb_obj
, RADEON_GEM_DOMAIN_GTT
,
1799 &rdev
->wb
.gpu_addr
);
1801 radeon_bo_unreserve(rdev
->wb
.wb_obj
);
1802 dev_warn(rdev
->dev
, "(%d) pin WB bo failed\n", r
);
1806 r
= radeon_bo_kmap(rdev
->wb
.wb_obj
, (void **)&rdev
->wb
.wb
);
1807 radeon_bo_unreserve(rdev
->wb
.wb_obj
);
1809 dev_warn(rdev
->dev
, "(%d) map WB bo failed\n", r
);
1814 WREG32(SCRATCH_ADDR
, (rdev
->wb
.gpu_addr
>> 8) & 0xFFFFFFFF);
1815 WREG32(CP_RB_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ 1024) & 0xFFFFFFFC);
1816 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ 1024) & 0xFF);
1817 WREG32(SCRATCH_UMSK
, 0xff);
1821 void r600_fence_ring_emit(struct radeon_device
*rdev
,
1822 struct radeon_fence
*fence
)
1824 /* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
1826 radeon_ring_write(rdev
, PACKET3(PACKET3_EVENT_WRITE
, 0));
1827 radeon_ring_write(rdev
, CACHE_FLUSH_AND_INV_EVENT
);
1828 /* wait for 3D idle clean */
1829 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1830 radeon_ring_write(rdev
, (WAIT_UNTIL
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
1831 radeon_ring_write(rdev
, WAIT_3D_IDLE_bit
| WAIT_3D_IDLECLEAN_bit
);
1832 /* Emit fence sequence & fire IRQ */
1833 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1834 radeon_ring_write(rdev
, ((rdev
->fence_drv
.scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
1835 radeon_ring_write(rdev
, fence
->seq
);
1836 /* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
1837 radeon_ring_write(rdev
, PACKET0(CP_INT_STATUS
, 0));
1838 radeon_ring_write(rdev
, RB_INT_STAT
);
1841 int r600_copy_blit(struct radeon_device
*rdev
,
1842 uint64_t src_offset
, uint64_t dst_offset
,
1843 unsigned num_pages
, struct radeon_fence
*fence
)
1847 mutex_lock(&rdev
->r600_blit
.mutex
);
1848 rdev
->r600_blit
.vb_ib
= NULL
;
1849 r
= r600_blit_prepare_copy(rdev
, num_pages
* RADEON_GPU_PAGE_SIZE
);
1851 if (rdev
->r600_blit
.vb_ib
)
1852 radeon_ib_free(rdev
, &rdev
->r600_blit
.vb_ib
);
1853 mutex_unlock(&rdev
->r600_blit
.mutex
);
1856 r600_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_pages
* RADEON_GPU_PAGE_SIZE
);
1857 r600_blit_done_copy(rdev
, fence
);
1858 mutex_unlock(&rdev
->r600_blit
.mutex
);
1862 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
1863 uint32_t tiling_flags
, uint32_t pitch
,
1864 uint32_t offset
, uint32_t obj_size
)
1866 /* FIXME: implement */
1870 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
1872 /* FIXME: implement */
1876 bool r600_card_posted(struct radeon_device
*rdev
)
1880 /* first check CRTCs */
1881 reg
= RREG32(D1CRTC_CONTROL
) |
1882 RREG32(D2CRTC_CONTROL
);
1886 /* then check MEM_SIZE, in case the crtcs are off */
1887 if (RREG32(CONFIG_MEMSIZE
))
1893 int r600_startup(struct radeon_device
*rdev
)
1897 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->rlc_fw
) {
1898 r
= r600_init_microcode(rdev
);
1900 DRM_ERROR("Failed to load firmware!\n");
1905 r600_mc_program(rdev
);
1906 if (rdev
->flags
& RADEON_IS_AGP
) {
1907 r600_agp_enable(rdev
);
1909 r
= r600_pcie_gart_enable(rdev
);
1913 r600_gpu_init(rdev
);
1914 r
= r600_blit_init(rdev
);
1916 r600_blit_fini(rdev
);
1917 rdev
->asic
->copy
= NULL
;
1918 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
1920 /* pin copy shader into vram */
1921 if (rdev
->r600_blit
.shader_obj
) {
1922 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
1923 if (unlikely(r
!= 0))
1925 r
= radeon_bo_pin(rdev
->r600_blit
.shader_obj
, RADEON_GEM_DOMAIN_VRAM
,
1926 &rdev
->r600_blit
.shader_gpu_addr
);
1927 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
1929 dev_err(rdev
->dev
, "(%d) pin blit object failed\n", r
);
1934 r
= r600_irq_init(rdev
);
1936 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
1937 radeon_irq_kms_fini(rdev
);
1942 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
1945 r
= r600_cp_load_microcode(rdev
);
1948 r
= r600_cp_resume(rdev
);
1951 /* write back buffer are not vital so don't worry about failure */
1952 r600_wb_enable(rdev
);
1956 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
1960 temp
= RREG32(CONFIG_CNTL
);
1961 if (state
== false) {
1967 WREG32(CONFIG_CNTL
, temp
);
1970 int r600_resume(struct radeon_device
*rdev
)
1974 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1975 * posting will perform necessary task to bring back GPU into good
1979 atom_asic_init(rdev
->mode_info
.atom_context
);
1980 /* Initialize clocks */
1981 r
= radeon_clocks_init(rdev
);
1986 r
= r600_startup(rdev
);
1988 DRM_ERROR("r600 startup failed on resume\n");
1992 r
= r600_ib_test(rdev
);
1994 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
1998 r
= r600_audio_init(rdev
);
2000 DRM_ERROR("radeon: audio resume failed\n");
2007 int r600_suspend(struct radeon_device
*rdev
)
2011 r600_audio_fini(rdev
);
2012 /* FIXME: we should wait for ring to be empty */
2014 rdev
->cp
.ready
= false;
2015 r600_irq_suspend(rdev
);
2016 r600_wb_disable(rdev
);
2017 r600_pcie_gart_disable(rdev
);
2018 /* unpin shaders bo */
2019 if (rdev
->r600_blit
.shader_obj
) {
2020 r
= radeon_bo_reserve(rdev
->r600_blit
.shader_obj
, false);
2022 radeon_bo_unpin(rdev
->r600_blit
.shader_obj
);
2023 radeon_bo_unreserve(rdev
->r600_blit
.shader_obj
);
2029 /* Plan is to move initialization in that function and use
2030 * helper function so that radeon_device_init pretty much
2031 * do nothing more than calling asic specific function. This
2032 * should also allow to remove a bunch of callback function
2035 int r600_init(struct radeon_device
*rdev
)
2039 r
= radeon_dummy_page_init(rdev
);
2042 if (r600_debugfs_mc_info_init(rdev
)) {
2043 DRM_ERROR("Failed to register debugfs file for mc !\n");
2045 /* This don't do much */
2046 r
= radeon_gem_init(rdev
);
2050 if (!radeon_get_bios(rdev
)) {
2051 if (ASIC_IS_AVIVO(rdev
))
2054 /* Must be an ATOMBIOS */
2055 if (!rdev
->is_atom_bios
) {
2056 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
2059 r
= radeon_atombios_init(rdev
);
2062 /* Post card if necessary */
2063 if (!r600_card_posted(rdev
)) {
2065 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
2068 DRM_INFO("GPU not posted. posting now...\n");
2069 atom_asic_init(rdev
->mode_info
.atom_context
);
2071 /* Initialize scratch registers */
2072 r600_scratch_init(rdev
);
2073 /* Initialize surface registers */
2074 radeon_surface_init(rdev
);
2075 /* Initialize clocks */
2076 radeon_get_clock_info(rdev
->ddev
);
2077 r
= radeon_clocks_init(rdev
);
2080 /* Initialize power management */
2081 radeon_pm_init(rdev
);
2083 r
= radeon_fence_driver_init(rdev
);
2086 if (rdev
->flags
& RADEON_IS_AGP
) {
2087 r
= radeon_agp_init(rdev
);
2089 radeon_agp_disable(rdev
);
2091 r
= r600_mc_init(rdev
);
2094 /* Memory manager */
2095 r
= radeon_bo_init(rdev
);
2099 r
= radeon_irq_kms_init(rdev
);
2103 rdev
->cp
.ring_obj
= NULL
;
2104 r600_ring_init(rdev
, 1024 * 1024);
2106 rdev
->ih
.ring_obj
= NULL
;
2107 r600_ih_ring_init(rdev
, 64 * 1024);
2109 r
= r600_pcie_gart_init(rdev
);
2113 rdev
->accel_working
= true;
2114 r
= r600_startup(rdev
);
2116 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
2119 r600_irq_fini(rdev
);
2120 radeon_irq_kms_fini(rdev
);
2121 r600_pcie_gart_fini(rdev
);
2122 rdev
->accel_working
= false;
2124 if (rdev
->accel_working
) {
2125 r
= radeon_ib_pool_init(rdev
);
2127 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
2128 rdev
->accel_working
= false;
2130 r
= r600_ib_test(rdev
);
2132 dev_err(rdev
->dev
, "IB test failed (%d).\n", r
);
2133 rdev
->accel_working
= false;
2138 r
= r600_audio_init(rdev
);
2140 return r
; /* TODO error handling */
2144 void r600_fini(struct radeon_device
*rdev
)
2146 radeon_pm_fini(rdev
);
2147 r600_audio_fini(rdev
);
2148 r600_blit_fini(rdev
);
2151 r600_irq_fini(rdev
);
2152 radeon_irq_kms_fini(rdev
);
2153 r600_pcie_gart_fini(rdev
);
2154 radeon_agp_fini(rdev
);
2155 radeon_gem_fini(rdev
);
2156 radeon_fence_driver_fini(rdev
);
2157 radeon_clocks_fini(rdev
);
2158 radeon_bo_fini(rdev
);
2159 radeon_atombios_fini(rdev
);
2162 radeon_dummy_page_fini(rdev
);
2169 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
2171 /* FIXME: implement */
2172 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
2173 radeon_ring_write(rdev
, ib
->gpu_addr
& 0xFFFFFFFC);
2174 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
2175 radeon_ring_write(rdev
, ib
->length_dw
);
2178 int r600_ib_test(struct radeon_device
*rdev
)
2180 struct radeon_ib
*ib
;
2186 r
= radeon_scratch_get(rdev
, &scratch
);
2188 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
2191 WREG32(scratch
, 0xCAFEDEAD);
2192 r
= radeon_ib_get(rdev
, &ib
);
2194 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
2197 ib
->ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
2198 ib
->ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
2199 ib
->ptr
[2] = 0xDEADBEEF;
2200 ib
->ptr
[3] = PACKET2(0);
2201 ib
->ptr
[4] = PACKET2(0);
2202 ib
->ptr
[5] = PACKET2(0);
2203 ib
->ptr
[6] = PACKET2(0);
2204 ib
->ptr
[7] = PACKET2(0);
2205 ib
->ptr
[8] = PACKET2(0);
2206 ib
->ptr
[9] = PACKET2(0);
2207 ib
->ptr
[10] = PACKET2(0);
2208 ib
->ptr
[11] = PACKET2(0);
2209 ib
->ptr
[12] = PACKET2(0);
2210 ib
->ptr
[13] = PACKET2(0);
2211 ib
->ptr
[14] = PACKET2(0);
2212 ib
->ptr
[15] = PACKET2(0);
2214 r
= radeon_ib_schedule(rdev
, ib
);
2216 radeon_scratch_free(rdev
, scratch
);
2217 radeon_ib_free(rdev
, &ib
);
2218 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
2221 r
= radeon_fence_wait(ib
->fence
, false);
2223 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
2226 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
2227 tmp
= RREG32(scratch
);
2228 if (tmp
== 0xDEADBEEF)
2232 if (i
< rdev
->usec_timeout
) {
2233 DRM_INFO("ib test succeeded in %u usecs\n", i
);
2235 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
2239 radeon_scratch_free(rdev
, scratch
);
2240 radeon_ib_free(rdev
, &ib
);
2247 * Interrupts use a ring buffer on r6xx/r7xx hardware. It works pretty
2248 * the same as the CP ring buffer, but in reverse. Rather than the CPU
2249 * writing to the ring and the GPU consuming, the GPU writes to the ring
2250 * and host consumes. As the host irq handler processes interrupts, it
2251 * increments the rptr. When the rptr catches up with the wptr, all the
2252 * current interrupts have been processed.
2255 void r600_ih_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
2259 /* Align ring size */
2260 rb_bufsz
= drm_order(ring_size
/ 4);
2261 ring_size
= (1 << rb_bufsz
) * 4;
2262 rdev
->ih
.ring_size
= ring_size
;
2263 rdev
->ih
.ptr_mask
= rdev
->ih
.ring_size
- 1;
2267 static int r600_ih_ring_alloc(struct radeon_device
*rdev
)
2271 /* Allocate ring buffer */
2272 if (rdev
->ih
.ring_obj
== NULL
) {
2273 r
= radeon_bo_create(rdev
, NULL
, rdev
->ih
.ring_size
,
2275 RADEON_GEM_DOMAIN_GTT
,
2276 &rdev
->ih
.ring_obj
);
2278 DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r
);
2281 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2282 if (unlikely(r
!= 0))
2284 r
= radeon_bo_pin(rdev
->ih
.ring_obj
,
2285 RADEON_GEM_DOMAIN_GTT
,
2286 &rdev
->ih
.gpu_addr
);
2288 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2289 DRM_ERROR("radeon: failed to pin ih ring buffer (%d).\n", r
);
2292 r
= radeon_bo_kmap(rdev
->ih
.ring_obj
,
2293 (void **)&rdev
->ih
.ring
);
2294 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2296 DRM_ERROR("radeon: failed to map ih ring buffer (%d).\n", r
);
2303 static void r600_ih_ring_fini(struct radeon_device
*rdev
)
2306 if (rdev
->ih
.ring_obj
) {
2307 r
= radeon_bo_reserve(rdev
->ih
.ring_obj
, false);
2308 if (likely(r
== 0)) {
2309 radeon_bo_kunmap(rdev
->ih
.ring_obj
);
2310 radeon_bo_unpin(rdev
->ih
.ring_obj
);
2311 radeon_bo_unreserve(rdev
->ih
.ring_obj
);
2313 radeon_bo_unref(&rdev
->ih
.ring_obj
);
2314 rdev
->ih
.ring
= NULL
;
2315 rdev
->ih
.ring_obj
= NULL
;
2319 void r600_rlc_stop(struct radeon_device
*rdev
)
2322 if ((rdev
->family
>= CHIP_RV770
) &&
2323 (rdev
->family
<= CHIP_RV740
)) {
2324 /* r7xx asics need to soft reset RLC before halting */
2325 WREG32(SRBM_SOFT_RESET
, SOFT_RESET_RLC
);
2326 RREG32(SRBM_SOFT_RESET
);
2328 WREG32(SRBM_SOFT_RESET
, 0);
2329 RREG32(SRBM_SOFT_RESET
);
2332 WREG32(RLC_CNTL
, 0);
2335 static void r600_rlc_start(struct radeon_device
*rdev
)
2337 WREG32(RLC_CNTL
, RLC_ENABLE
);
2340 static int r600_rlc_init(struct radeon_device
*rdev
)
2343 const __be32
*fw_data
;
2348 r600_rlc_stop(rdev
);
2350 WREG32(RLC_HB_BASE
, 0);
2351 WREG32(RLC_HB_CNTL
, 0);
2352 WREG32(RLC_HB_RPTR
, 0);
2353 WREG32(RLC_HB_WPTR
, 0);
2354 WREG32(RLC_HB_WPTR_LSB_ADDR
, 0);
2355 WREG32(RLC_HB_WPTR_MSB_ADDR
, 0);
2356 WREG32(RLC_MC_CNTL
, 0);
2357 WREG32(RLC_UCODE_CNTL
, 0);
2359 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
2360 if (rdev
->family
>= CHIP_CEDAR
) {
2361 for (i
= 0; i
< EVERGREEN_RLC_UCODE_SIZE
; i
++) {
2362 WREG32(RLC_UCODE_ADDR
, i
);
2363 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2365 } else if (rdev
->family
>= CHIP_RV770
) {
2366 for (i
= 0; i
< R700_RLC_UCODE_SIZE
; i
++) {
2367 WREG32(RLC_UCODE_ADDR
, i
);
2368 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2371 for (i
= 0; i
< RLC_UCODE_SIZE
; i
++) {
2372 WREG32(RLC_UCODE_ADDR
, i
);
2373 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2376 WREG32(RLC_UCODE_ADDR
, 0);
2378 r600_rlc_start(rdev
);
2383 static void r600_enable_interrupts(struct radeon_device
*rdev
)
2385 u32 ih_cntl
= RREG32(IH_CNTL
);
2386 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2388 ih_cntl
|= ENABLE_INTR
;
2389 ih_rb_cntl
|= IH_RB_ENABLE
;
2390 WREG32(IH_CNTL
, ih_cntl
);
2391 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2392 rdev
->ih
.enabled
= true;
2395 void r600_disable_interrupts(struct radeon_device
*rdev
)
2397 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2398 u32 ih_cntl
= RREG32(IH_CNTL
);
2400 ih_rb_cntl
&= ~IH_RB_ENABLE
;
2401 ih_cntl
&= ~ENABLE_INTR
;
2402 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2403 WREG32(IH_CNTL
, ih_cntl
);
2404 /* set rptr, wptr to 0 */
2405 WREG32(IH_RB_RPTR
, 0);
2406 WREG32(IH_RB_WPTR
, 0);
2407 rdev
->ih
.enabled
= false;
2412 static void r600_disable_interrupt_state(struct radeon_device
*rdev
)
2416 WREG32(CP_INT_CNTL
, 0);
2417 WREG32(GRBM_INT_CNTL
, 0);
2418 WREG32(DxMODE_INT_MASK
, 0);
2419 if (ASIC_IS_DCE3(rdev
)) {
2420 WREG32(DCE3_DACA_AUTODETECT_INT_CONTROL
, 0);
2421 WREG32(DCE3_DACB_AUTODETECT_INT_CONTROL
, 0);
2422 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2423 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2424 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2425 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2426 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2427 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2428 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2429 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2430 if (ASIC_IS_DCE32(rdev
)) {
2431 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2432 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2433 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
2434 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2437 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
2438 WREG32(DACB_AUTODETECT_INT_CONTROL
, 0);
2439 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2440 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
2441 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2442 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
2443 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & DC_HOT_PLUG_DETECTx_INT_POLARITY
;
2444 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
2448 int r600_irq_init(struct radeon_device
*rdev
)
2452 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
2455 ret
= r600_ih_ring_alloc(rdev
);
2460 r600_disable_interrupts(rdev
);
2463 ret
= r600_rlc_init(rdev
);
2465 r600_ih_ring_fini(rdev
);
2469 /* setup interrupt control */
2470 /* set dummy read address to ring address */
2471 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
2472 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
2473 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
2474 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
2476 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
2477 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
2478 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
2479 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
2481 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
2482 rb_bufsz
= drm_order(rdev
->ih
.ring_size
/ 4);
2484 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
2485 IH_WPTR_OVERFLOW_CLEAR
|
2487 /* WPTR writeback, not yet */
2488 /*ih_rb_cntl |= IH_WPTR_WRITEBACK_ENABLE;*/
2489 WREG32(IH_RB_WPTR_ADDR_LO
, 0);
2490 WREG32(IH_RB_WPTR_ADDR_HI
, 0);
2492 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2494 /* set rptr, wptr to 0 */
2495 WREG32(IH_RB_RPTR
, 0);
2496 WREG32(IH_RB_WPTR
, 0);
2498 /* Default settings for IH_CNTL (disabled at first) */
2499 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10);
2500 /* RPTR_REARM only works if msi's are enabled */
2501 if (rdev
->msi_enabled
)
2502 ih_cntl
|= RPTR_REARM
;
2505 ih_cntl
|= IH_MC_SWAP(IH_MC_SWAP_32BIT
);
2507 WREG32(IH_CNTL
, ih_cntl
);
2509 /* force the active interrupt state to all disabled */
2510 if (rdev
->family
>= CHIP_CEDAR
)
2511 evergreen_disable_interrupt_state(rdev
);
2513 r600_disable_interrupt_state(rdev
);
2516 r600_enable_interrupts(rdev
);
2521 void r600_irq_suspend(struct radeon_device
*rdev
)
2523 r600_irq_disable(rdev
);
2524 r600_rlc_stop(rdev
);
2527 void r600_irq_fini(struct radeon_device
*rdev
)
2529 r600_irq_suspend(rdev
);
2530 r600_ih_ring_fini(rdev
);
2533 int r600_irq_set(struct radeon_device
*rdev
)
2535 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
2537 u32 hpd1
, hpd2
, hpd3
, hpd4
= 0, hpd5
= 0, hpd6
= 0;
2538 u32 grbm_int_cntl
= 0;
2541 if (!rdev
->irq
.installed
) {
2542 WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
2545 /* don't enable anything if the ih is disabled */
2546 if (!rdev
->ih
.enabled
) {
2547 r600_disable_interrupts(rdev
);
2548 /* force the active interrupt state to all disabled */
2549 r600_disable_interrupt_state(rdev
);
2553 hdmi1
= RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
2554 if (ASIC_IS_DCE3(rdev
)) {
2555 hdmi2
= RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
2556 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2557 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2558 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2559 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2560 if (ASIC_IS_DCE32(rdev
)) {
2561 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2562 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2565 hdmi2
= RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
) & ~R600_HDMI_INT_EN
;
2566 hpd1
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2567 hpd2
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2568 hpd3
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
2571 if (rdev
->irq
.sw_int
) {
2572 DRM_DEBUG("r600_irq_set: sw int\n");
2573 cp_int_cntl
|= RB_INT_ENABLE
;
2575 if (rdev
->irq
.crtc_vblank_int
[0]) {
2576 DRM_DEBUG("r600_irq_set: vblank 0\n");
2577 mode_int
|= D1MODE_VBLANK_INT_MASK
;
2579 if (rdev
->irq
.crtc_vblank_int
[1]) {
2580 DRM_DEBUG("r600_irq_set: vblank 1\n");
2581 mode_int
|= D2MODE_VBLANK_INT_MASK
;
2583 if (rdev
->irq
.hpd
[0]) {
2584 DRM_DEBUG("r600_irq_set: hpd 1\n");
2585 hpd1
|= DC_HPDx_INT_EN
;
2587 if (rdev
->irq
.hpd
[1]) {
2588 DRM_DEBUG("r600_irq_set: hpd 2\n");
2589 hpd2
|= DC_HPDx_INT_EN
;
2591 if (rdev
->irq
.hpd
[2]) {
2592 DRM_DEBUG("r600_irq_set: hpd 3\n");
2593 hpd3
|= DC_HPDx_INT_EN
;
2595 if (rdev
->irq
.hpd
[3]) {
2596 DRM_DEBUG("r600_irq_set: hpd 4\n");
2597 hpd4
|= DC_HPDx_INT_EN
;
2599 if (rdev
->irq
.hpd
[4]) {
2600 DRM_DEBUG("r600_irq_set: hpd 5\n");
2601 hpd5
|= DC_HPDx_INT_EN
;
2603 if (rdev
->irq
.hpd
[5]) {
2604 DRM_DEBUG("r600_irq_set: hpd 6\n");
2605 hpd6
|= DC_HPDx_INT_EN
;
2607 if (rdev
->irq
.hdmi
[0]) {
2608 DRM_DEBUG("r600_irq_set: hdmi 1\n");
2609 hdmi1
|= R600_HDMI_INT_EN
;
2611 if (rdev
->irq
.hdmi
[1]) {
2612 DRM_DEBUG("r600_irq_set: hdmi 2\n");
2613 hdmi2
|= R600_HDMI_INT_EN
;
2615 if (rdev
->irq
.gui_idle
) {
2616 DRM_DEBUG("gui idle\n");
2617 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
2620 WREG32(CP_INT_CNTL
, cp_int_cntl
);
2621 WREG32(DxMODE_INT_MASK
, mode_int
);
2622 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
2623 WREG32(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, hdmi1
);
2624 if (ASIC_IS_DCE3(rdev
)) {
2625 WREG32(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, hdmi2
);
2626 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
2627 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
2628 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
2629 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
2630 if (ASIC_IS_DCE32(rdev
)) {
2631 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
2632 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
2635 WREG32(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, hdmi2
);
2636 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, hpd1
);
2637 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, hpd2
);
2638 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, hpd3
);
2644 static inline void r600_irq_ack(struct radeon_device
*rdev
,
2647 u32
*disp_int_cont2
)
2651 if (ASIC_IS_DCE3(rdev
)) {
2652 *disp_int
= RREG32(DCE3_DISP_INTERRUPT_STATUS
);
2653 *disp_int_cont
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE
);
2654 *disp_int_cont2
= RREG32(DCE3_DISP_INTERRUPT_STATUS_CONTINUE2
);
2656 *disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
2657 *disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
2658 *disp_int_cont2
= 0;
2661 if (*disp_int
& LB_D1_VBLANK_INTERRUPT
)
2662 WREG32(D1MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
2663 if (*disp_int
& LB_D1_VLINE_INTERRUPT
)
2664 WREG32(D1MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
2665 if (*disp_int
& LB_D2_VBLANK_INTERRUPT
)
2666 WREG32(D2MODE_VBLANK_STATUS
, DxMODE_VBLANK_ACK
);
2667 if (*disp_int
& LB_D2_VLINE_INTERRUPT
)
2668 WREG32(D2MODE_VLINE_STATUS
, DxMODE_VLINE_ACK
);
2669 if (*disp_int
& DC_HPD1_INTERRUPT
) {
2670 if (ASIC_IS_DCE3(rdev
)) {
2671 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
2672 tmp
|= DC_HPDx_INT_ACK
;
2673 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
2675 tmp
= RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
);
2676 tmp
|= DC_HPDx_INT_ACK
;
2677 WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL
, tmp
);
2680 if (*disp_int
& DC_HPD2_INTERRUPT
) {
2681 if (ASIC_IS_DCE3(rdev
)) {
2682 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
2683 tmp
|= DC_HPDx_INT_ACK
;
2684 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
2686 tmp
= RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
);
2687 tmp
|= DC_HPDx_INT_ACK
;
2688 WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL
, tmp
);
2691 if (*disp_int_cont
& DC_HPD3_INTERRUPT
) {
2692 if (ASIC_IS_DCE3(rdev
)) {
2693 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
2694 tmp
|= DC_HPDx_INT_ACK
;
2695 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
2697 tmp
= RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
);
2698 tmp
|= DC_HPDx_INT_ACK
;
2699 WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL
, tmp
);
2702 if (*disp_int_cont
& DC_HPD4_INTERRUPT
) {
2703 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
2704 tmp
|= DC_HPDx_INT_ACK
;
2705 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
2707 if (ASIC_IS_DCE32(rdev
)) {
2708 if (*disp_int_cont2
& DC_HPD5_INTERRUPT
) {
2709 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2710 tmp
|= DC_HPDx_INT_ACK
;
2711 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
2713 if (*disp_int_cont2
& DC_HPD6_INTERRUPT
) {
2714 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
2715 tmp
|= DC_HPDx_INT_ACK
;
2716 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
2719 if (RREG32(R600_HDMI_BLOCK1
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
2720 WREG32_P(R600_HDMI_BLOCK1
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
2722 if (ASIC_IS_DCE3(rdev
)) {
2723 if (RREG32(R600_HDMI_BLOCK3
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
2724 WREG32_P(R600_HDMI_BLOCK3
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
2727 if (RREG32(R600_HDMI_BLOCK2
+ R600_HDMI_STATUS
) & R600_HDMI_INT_PENDING
) {
2728 WREG32_P(R600_HDMI_BLOCK2
+ R600_HDMI_CNTL
, R600_HDMI_INT_ACK
, ~R600_HDMI_INT_ACK
);
2733 void r600_irq_disable(struct radeon_device
*rdev
)
2735 u32 disp_int
, disp_int_cont
, disp_int_cont2
;
2737 r600_disable_interrupts(rdev
);
2738 /* Wait and acknowledge irq */
2740 r600_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
);
2741 r600_disable_interrupt_state(rdev
);
2744 static inline u32
r600_get_ih_wptr(struct radeon_device
*rdev
)
2748 /* XXX use writeback */
2749 wptr
= RREG32(IH_RB_WPTR
);
2751 if (wptr
& RB_OVERFLOW
) {
2752 /* When a ring buffer overflow happen start parsing interrupt
2753 * from the last not overwritten vector (wptr + 16). Hopefully
2754 * this should allow us to catchup.
2756 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
2757 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
2758 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
2759 tmp
= RREG32(IH_RB_CNTL
);
2760 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
2761 WREG32(IH_RB_CNTL
, tmp
);
2763 return (wptr
& rdev
->ih
.ptr_mask
);
2767 * Each IV ring entry is 128 bits:
2768 * [7:0] - interrupt source id
2770 * [59:32] - interrupt source data
2771 * [127:60] - reserved
2773 * The basic interrupt vector entries
2774 * are decoded as follows:
2775 * src_id src_data description
2780 * 19 0 FP Hot plug detection A
2781 * 19 1 FP Hot plug detection B
2782 * 19 2 DAC A auto-detection
2783 * 19 3 DAC B auto-detection
2789 * 181 - EOP Interrupt
2792 * Note, these are based on r600 and may need to be
2793 * adjusted or added to on newer asics
2796 int r600_irq_process(struct radeon_device
*rdev
)
2798 u32 wptr
= r600_get_ih_wptr(rdev
);
2799 u32 rptr
= rdev
->ih
.rptr
;
2800 u32 src_id
, src_data
;
2801 u32 ring_index
, disp_int
, disp_int_cont
, disp_int_cont2
;
2802 unsigned long flags
;
2803 bool queue_hotplug
= false;
2805 DRM_DEBUG("r600_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
2806 if (!rdev
->ih
.enabled
)
2809 spin_lock_irqsave(&rdev
->ih
.lock
, flags
);
2812 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2815 if (rdev
->shutdown
) {
2816 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2821 /* display interrupts */
2822 r600_irq_ack(rdev
, &disp_int
, &disp_int_cont
, &disp_int_cont2
);
2824 rdev
->ih
.wptr
= wptr
;
2825 while (rptr
!= wptr
) {
2826 /* wptr/rptr are in bytes! */
2827 ring_index
= rptr
/ 4;
2828 src_id
= rdev
->ih
.ring
[ring_index
] & 0xff;
2829 src_data
= rdev
->ih
.ring
[ring_index
+ 1] & 0xfffffff;
2832 case 1: /* D1 vblank/vline */
2834 case 0: /* D1 vblank */
2835 if (disp_int
& LB_D1_VBLANK_INTERRUPT
) {
2836 drm_handle_vblank(rdev
->ddev
, 0);
2837 rdev
->pm
.vblank_sync
= true;
2838 wake_up(&rdev
->irq
.vblank_queue
);
2839 disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
2840 DRM_DEBUG("IH: D1 vblank\n");
2843 case 1: /* D1 vline */
2844 if (disp_int
& LB_D1_VLINE_INTERRUPT
) {
2845 disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
2846 DRM_DEBUG("IH: D1 vline\n");
2850 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2854 case 5: /* D2 vblank/vline */
2856 case 0: /* D2 vblank */
2857 if (disp_int
& LB_D2_VBLANK_INTERRUPT
) {
2858 drm_handle_vblank(rdev
->ddev
, 1);
2859 rdev
->pm
.vblank_sync
= true;
2860 wake_up(&rdev
->irq
.vblank_queue
);
2861 disp_int
&= ~LB_D2_VBLANK_INTERRUPT
;
2862 DRM_DEBUG("IH: D2 vblank\n");
2865 case 1: /* D1 vline */
2866 if (disp_int
& LB_D2_VLINE_INTERRUPT
) {
2867 disp_int
&= ~LB_D2_VLINE_INTERRUPT
;
2868 DRM_DEBUG("IH: D2 vline\n");
2872 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2876 case 19: /* HPD/DAC hotplug */
2879 if (disp_int
& DC_HPD1_INTERRUPT
) {
2880 disp_int
&= ~DC_HPD1_INTERRUPT
;
2881 queue_hotplug
= true;
2882 DRM_DEBUG("IH: HPD1\n");
2886 if (disp_int
& DC_HPD2_INTERRUPT
) {
2887 disp_int
&= ~DC_HPD2_INTERRUPT
;
2888 queue_hotplug
= true;
2889 DRM_DEBUG("IH: HPD2\n");
2893 if (disp_int_cont
& DC_HPD3_INTERRUPT
) {
2894 disp_int_cont
&= ~DC_HPD3_INTERRUPT
;
2895 queue_hotplug
= true;
2896 DRM_DEBUG("IH: HPD3\n");
2900 if (disp_int_cont
& DC_HPD4_INTERRUPT
) {
2901 disp_int_cont
&= ~DC_HPD4_INTERRUPT
;
2902 queue_hotplug
= true;
2903 DRM_DEBUG("IH: HPD4\n");
2907 if (disp_int_cont2
& DC_HPD5_INTERRUPT
) {
2908 disp_int_cont2
&= ~DC_HPD5_INTERRUPT
;
2909 queue_hotplug
= true;
2910 DRM_DEBUG("IH: HPD5\n");
2914 if (disp_int_cont2
& DC_HPD6_INTERRUPT
) {
2915 disp_int_cont2
&= ~DC_HPD6_INTERRUPT
;
2916 queue_hotplug
= true;
2917 DRM_DEBUG("IH: HPD6\n");
2921 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2926 DRM_DEBUG("IH: HDMI: 0x%x\n", src_data
);
2927 r600_audio_schedule_polling(rdev
);
2929 case 176: /* CP_INT in ring buffer */
2930 case 177: /* CP_INT in IB1 */
2931 case 178: /* CP_INT in IB2 */
2932 DRM_DEBUG("IH: CP int: 0x%08x\n", src_data
);
2933 radeon_fence_process(rdev
);
2935 case 181: /* CP EOP event */
2936 DRM_DEBUG("IH: CP EOP\n");
2938 case 233: /* GUI IDLE */
2939 DRM_DEBUG("IH: CP EOP\n");
2940 rdev
->pm
.gui_idle
= true;
2941 wake_up(&rdev
->irq
.idle_queue
);
2944 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
2948 /* wptr/rptr are in bytes! */
2950 rptr
&= rdev
->ih
.ptr_mask
;
2952 /* make sure wptr hasn't changed while processing */
2953 wptr
= r600_get_ih_wptr(rdev
);
2954 if (wptr
!= rdev
->ih
.wptr
)
2957 queue_work(rdev
->wq
, &rdev
->hotplug_work
);
2958 rdev
->ih
.rptr
= rptr
;
2959 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
2960 spin_unlock_irqrestore(&rdev
->ih
.lock
, flags
);
2967 #if defined(CONFIG_DEBUG_FS)
2969 static int r600_debugfs_cp_ring_info(struct seq_file
*m
, void *data
)
2971 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2972 struct drm_device
*dev
= node
->minor
->dev
;
2973 struct radeon_device
*rdev
= dev
->dev_private
;
2974 unsigned count
, i
, j
;
2976 radeon_ring_free_size(rdev
);
2977 count
= (rdev
->cp
.ring_size
/ 4) - rdev
->cp
.ring_free_dw
;
2978 seq_printf(m
, "CP_STAT 0x%08x\n", RREG32(CP_STAT
));
2979 seq_printf(m
, "CP_RB_WPTR 0x%08x\n", RREG32(CP_RB_WPTR
));
2980 seq_printf(m
, "CP_RB_RPTR 0x%08x\n", RREG32(CP_RB_RPTR
));
2981 seq_printf(m
, "driver's copy of the CP_RB_WPTR 0x%08x\n", rdev
->cp
.wptr
);
2982 seq_printf(m
, "driver's copy of the CP_RB_RPTR 0x%08x\n", rdev
->cp
.rptr
);
2983 seq_printf(m
, "%u free dwords in ring\n", rdev
->cp
.ring_free_dw
);
2984 seq_printf(m
, "%u dwords in ring\n", count
);
2986 for (j
= 0; j
<= count
; j
++) {
2987 seq_printf(m
, "r[%04d]=0x%08x\n", i
, rdev
->cp
.ring
[i
]);
2988 i
= (i
+ 1) & rdev
->cp
.ptr_mask
;
2993 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
2995 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
2996 struct drm_device
*dev
= node
->minor
->dev
;
2997 struct radeon_device
*rdev
= dev
->dev_private
;
2999 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
3000 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
3004 static struct drm_info_list r600_mc_info_list
[] = {
3005 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
3006 {"r600_ring_info", r600_debugfs_cp_ring_info
, 0, NULL
},
3010 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
3012 #if defined(CONFIG_DEBUG_FS)
3013 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));
3020 * r600_ioctl_wait_idle - flush host path cache on wait idle ioctl
3021 * rdev: radeon device structure
3022 * bo: buffer object struct which userspace is waiting for idle
3024 * Some R6XX/R7XX doesn't seems to take into account HDP flush performed
3025 * through ring buffer, this leads to corruption in rendering, see
3026 * http://bugzilla.kernel.org/show_bug.cgi?id=15186 to avoid this we
3027 * directly perform HDP flush by writing register through MMIO.
3029 void r600_ioctl_wait_idle(struct radeon_device
*rdev
, struct radeon_bo
*bo
)
3031 WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);