2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
31 #include "radeon_reg.h"
33 #include "radeon_drm.h"
34 #include "radeon_share.h"
35 #include "r100_track.h"
38 #include "r300_reg_safe.h"
40 /* r300,r350,rv350,rv370,rv380 depends on : */
41 void r100_hdp_reset(struct radeon_device
*rdev
);
42 int r100_cp_reset(struct radeon_device
*rdev
);
43 int r100_rb2d_reset(struct radeon_device
*rdev
);
44 int r100_cp_init(struct radeon_device
*rdev
, unsigned ring_size
);
45 int r100_pci_gart_enable(struct radeon_device
*rdev
);
46 void r100_pci_gart_disable(struct radeon_device
*rdev
);
47 void r100_mc_setup(struct radeon_device
*rdev
);
48 void r100_mc_disable_clients(struct radeon_device
*rdev
);
49 int r100_gui_wait_for_idle(struct radeon_device
*rdev
);
50 int r100_cs_packet_parse(struct radeon_cs_parser
*p
,
51 struct radeon_cs_packet
*pkt
,
53 int r100_cs_packet_parse_vline(struct radeon_cs_parser
*p
);
54 int r100_cs_parse_packet0(struct radeon_cs_parser
*p
,
55 struct radeon_cs_packet
*pkt
,
56 const unsigned *auth
, unsigned n
,
57 radeon_packet0_check_t check
);
58 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser
*p
,
59 struct radeon_cs_packet
*pkt
,
60 struct radeon_object
*robj
);
62 /* This files gather functions specifics to:
63 * r300,r350,rv350,rv370,rv380
65 * Some of these functions might be used by newer ASICs.
67 void r300_gpu_init(struct radeon_device
*rdev
);
68 int r300_mc_wait_for_idle(struct radeon_device
*rdev
);
69 int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
);
73 * rv370,rv380 PCIE GART
75 void rv370_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
80 /* Workaround HW bug do flush 2 times */
81 for (i
= 0; i
< 2; i
++) {
82 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
| RADEON_PCIE_TX_GART_INVALIDATE_TLB
);
84 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
85 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
90 int rv370_pcie_gart_enable(struct radeon_device
*rdev
)
96 /* Initialize common gart structure */
97 r
= radeon_gart_init(rdev
);
101 r
= rv370_debugfs_pcie_gart_info_init(rdev
);
103 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
105 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 4;
106 r
= radeon_gart_table_vram_alloc(rdev
);
110 /* discard memory request outside of configured range */
111 tmp
= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
112 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
113 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
, rdev
->mc
.gtt_location
);
114 tmp
= rdev
->mc
.gtt_location
+ rdev
->mc
.gtt_size
- 4096;
115 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
, tmp
);
116 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
, 0);
117 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
, 0);
118 table_addr
= rdev
->gart
.table_addr
;
119 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE
, table_addr
);
120 /* FIXME: setup default page */
121 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO
, rdev
->mc
.vram_location
);
122 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI
, 0);
124 WREG32_PCIE(0x18, 0);
125 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
126 tmp
|= RADEON_PCIE_TX_GART_EN
;
127 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
128 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
129 rv370_pcie_gart_tlb_flush(rdev
);
130 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
131 (unsigned)(rdev
->mc
.gtt_size
>> 20), table_addr
);
132 rdev
->gart
.ready
= true;
136 void rv370_pcie_gart_disable(struct radeon_device
*rdev
)
140 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
141 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
142 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
& ~RADEON_PCIE_TX_GART_EN
);
143 if (rdev
->gart
.table
.vram
.robj
) {
144 radeon_object_kunmap(rdev
->gart
.table
.vram
.robj
);
145 radeon_object_unpin(rdev
->gart
.table
.vram
.robj
);
149 int rv370_pcie_gart_set_page(struct radeon_device
*rdev
, int i
, uint64_t addr
)
151 void __iomem
*ptr
= (void *)rdev
->gart
.table
.vram
.ptr
;
153 if (i
< 0 || i
> rdev
->gart
.num_gpu_pages
) {
156 addr
= (lower_32_bits(addr
) >> 8) |
157 ((upper_32_bits(addr
) & 0xff) << 24) |
159 /* on x86 we want this to be CPU endian, on powerpc
160 * on powerpc without HW swappers, it'll get swapped on way
161 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
162 writel(addr
, ((void __iomem
*)ptr
) + (i
* 4));
166 int r300_gart_enable(struct radeon_device
*rdev
)
169 if (rdev
->flags
& RADEON_IS_AGP
) {
170 if (rdev
->family
> CHIP_RV350
) {
171 rv370_pcie_gart_disable(rdev
);
173 r100_pci_gart_disable(rdev
);
178 if (rdev
->flags
& RADEON_IS_PCIE
) {
179 rdev
->asic
->gart_disable
= &rv370_pcie_gart_disable
;
180 rdev
->asic
->gart_tlb_flush
= &rv370_pcie_gart_tlb_flush
;
181 rdev
->asic
->gart_set_page
= &rv370_pcie_gart_set_page
;
182 return rv370_pcie_gart_enable(rdev
);
184 if (rdev
->flags
& RADEON_IS_PCI
) {
185 rdev
->asic
->gart_disable
= &r100_pci_gart_disable
;
186 rdev
->asic
->gart_tlb_flush
= &r100_pci_gart_tlb_flush
;
187 rdev
->asic
->gart_set_page
= &r100_pci_gart_set_page
;
188 return r100_pci_gart_enable(rdev
);
190 return r100_pci_gart_enable(rdev
);
197 int r300_mc_init(struct radeon_device
*rdev
)
201 if (r100_debugfs_rbbm_init(rdev
)) {
202 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
206 r100_pci_gart_disable(rdev
);
207 if (rdev
->flags
& RADEON_IS_PCIE
) {
208 rv370_pcie_gart_disable(rdev
);
211 /* Setup GPU memory space */
212 rdev
->mc
.vram_location
= 0xFFFFFFFFUL
;
213 rdev
->mc
.gtt_location
= 0xFFFFFFFFUL
;
214 if (rdev
->flags
& RADEON_IS_AGP
) {
215 r
= radeon_agp_init(rdev
);
217 printk(KERN_WARNING
"[drm] Disabling AGP\n");
218 rdev
->flags
&= ~RADEON_IS_AGP
;
219 rdev
->mc
.gtt_size
= radeon_gart_size
* 1024 * 1024;
221 rdev
->mc
.gtt_location
= rdev
->mc
.agp_base
;
224 r
= radeon_mc_setup(rdev
);
229 /* Program GPU memory space */
230 r100_mc_disable_clients(rdev
);
231 if (r300_mc_wait_for_idle(rdev
)) {
232 printk(KERN_WARNING
"Failed to wait MC idle while "
233 "programming pipes. Bad things might happen.\n");
239 void r300_mc_fini(struct radeon_device
*rdev
)
241 if (rdev
->flags
& RADEON_IS_PCIE
) {
242 rv370_pcie_gart_disable(rdev
);
243 radeon_gart_table_vram_free(rdev
);
245 r100_pci_gart_disable(rdev
);
246 radeon_gart_table_ram_free(rdev
);
248 radeon_gart_fini(rdev
);
255 void r300_fence_ring_emit(struct radeon_device
*rdev
,
256 struct radeon_fence
*fence
)
258 /* Who ever call radeon_fence_emit should call ring_lock and ask
259 * for enough space (today caller are ib schedule and buffer move) */
260 /* Write SC register so SC & US assert idle */
261 radeon_ring_write(rdev
, PACKET0(0x43E0, 0));
262 radeon_ring_write(rdev
, 0);
263 radeon_ring_write(rdev
, PACKET0(0x43E4, 0));
264 radeon_ring_write(rdev
, 0);
266 radeon_ring_write(rdev
, PACKET0(0x4E4C, 0));
267 radeon_ring_write(rdev
, (2 << 0));
268 radeon_ring_write(rdev
, PACKET0(0x4F18, 0));
269 radeon_ring_write(rdev
, (1 << 0));
270 /* Wait until IDLE & CLEAN */
271 radeon_ring_write(rdev
, PACKET0(0x1720, 0));
272 radeon_ring_write(rdev
, (1 << 17) | (1 << 16) | (1 << 9));
273 /* Emit fence sequence & fire IRQ */
274 radeon_ring_write(rdev
, PACKET0(rdev
->fence_drv
.scratch_reg
, 0));
275 radeon_ring_write(rdev
, fence
->seq
);
276 radeon_ring_write(rdev
, PACKET0(RADEON_GEN_INT_STATUS
, 0));
277 radeon_ring_write(rdev
, RADEON_SW_INT_FIRE
);
282 * Global GPU functions
284 int r300_copy_dma(struct radeon_device
*rdev
,
288 struct radeon_fence
*fence
)
295 /* radeon pitch is /64 */
296 size
= num_pages
<< PAGE_SHIFT
;
297 num_loops
= DIV_ROUND_UP(size
, 0x1FFFFF);
298 r
= radeon_ring_lock(rdev
, num_loops
* 4 + 64);
300 DRM_ERROR("radeon: moving bo (%d).\n", r
);
303 /* Must wait for 2D idle & clean before DMA or hangs might happen */
304 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0 ));
305 radeon_ring_write(rdev
, (1 << 16));
306 for (i
= 0; i
< num_loops
; i
++) {
308 if (cur_size
> 0x1FFFFF) {
312 radeon_ring_write(rdev
, PACKET0(0x720, 2));
313 radeon_ring_write(rdev
, src_offset
);
314 radeon_ring_write(rdev
, dst_offset
);
315 radeon_ring_write(rdev
, cur_size
| (1 << 31) | (1 << 30));
316 src_offset
+= cur_size
;
317 dst_offset
+= cur_size
;
319 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
320 radeon_ring_write(rdev
, RADEON_WAIT_DMA_GUI_IDLE
);
322 r
= radeon_fence_emit(rdev
, fence
);
324 radeon_ring_unlock_commit(rdev
);
328 void r300_ring_start(struct radeon_device
*rdev
)
330 unsigned gb_tile_config
;
333 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
334 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
335 switch(rdev
->num_gb_pipes
) {
337 gb_tile_config
|= R300_PIPE_COUNT_R300
;
340 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
343 gb_tile_config
|= R300_PIPE_COUNT_R420
;
347 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
351 r
= radeon_ring_lock(rdev
, 64);
355 radeon_ring_write(rdev
, PACKET0(RADEON_ISYNC_CNTL
, 0));
356 radeon_ring_write(rdev
,
357 RADEON_ISYNC_ANY2D_IDLE3D
|
358 RADEON_ISYNC_ANY3D_IDLE2D
|
359 RADEON_ISYNC_WAIT_IDLEGUI
|
360 RADEON_ISYNC_CPSCRATCH_IDLEGUI
);
361 radeon_ring_write(rdev
, PACKET0(R300_GB_TILE_CONFIG
, 0));
362 radeon_ring_write(rdev
, gb_tile_config
);
363 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
364 radeon_ring_write(rdev
,
365 RADEON_WAIT_2D_IDLECLEAN
|
366 RADEON_WAIT_3D_IDLECLEAN
);
367 radeon_ring_write(rdev
, PACKET0(0x170C, 0));
368 radeon_ring_write(rdev
, 1 << 31);
369 radeon_ring_write(rdev
, PACKET0(R300_GB_SELECT
, 0));
370 radeon_ring_write(rdev
, 0);
371 radeon_ring_write(rdev
, PACKET0(R300_GB_ENABLE
, 0));
372 radeon_ring_write(rdev
, 0);
373 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
374 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
375 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
376 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
377 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
378 radeon_ring_write(rdev
,
379 RADEON_WAIT_2D_IDLECLEAN
|
380 RADEON_WAIT_3D_IDLECLEAN
);
381 radeon_ring_write(rdev
, PACKET0(R300_GB_AA_CONFIG
, 0));
382 radeon_ring_write(rdev
, 0);
383 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
384 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
385 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
386 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
387 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS0
, 0));
388 radeon_ring_write(rdev
,
389 ((6 << R300_MS_X0_SHIFT
) |
390 (6 << R300_MS_Y0_SHIFT
) |
391 (6 << R300_MS_X1_SHIFT
) |
392 (6 << R300_MS_Y1_SHIFT
) |
393 (6 << R300_MS_X2_SHIFT
) |
394 (6 << R300_MS_Y2_SHIFT
) |
395 (6 << R300_MSBD0_Y_SHIFT
) |
396 (6 << R300_MSBD0_X_SHIFT
)));
397 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS1
, 0));
398 radeon_ring_write(rdev
,
399 ((6 << R300_MS_X3_SHIFT
) |
400 (6 << R300_MS_Y3_SHIFT
) |
401 (6 << R300_MS_X4_SHIFT
) |
402 (6 << R300_MS_Y4_SHIFT
) |
403 (6 << R300_MS_X5_SHIFT
) |
404 (6 << R300_MS_Y5_SHIFT
) |
405 (6 << R300_MSBD1_SHIFT
)));
406 radeon_ring_write(rdev
, PACKET0(R300_GA_ENHANCE
, 0));
407 radeon_ring_write(rdev
, R300_GA_DEADLOCK_CNTL
| R300_GA_FASTSYNC_CNTL
);
408 radeon_ring_write(rdev
, PACKET0(R300_GA_POLY_MODE
, 0));
409 radeon_ring_write(rdev
,
410 R300_FRONT_PTYPE_TRIANGE
| R300_BACK_PTYPE_TRIANGE
);
411 radeon_ring_write(rdev
, PACKET0(R300_GA_ROUND_MODE
, 0));
412 radeon_ring_write(rdev
,
413 R300_GEOMETRY_ROUND_NEAREST
|
414 R300_COLOR_ROUND_NEAREST
);
415 radeon_ring_unlock_commit(rdev
);
418 void r300_errata(struct radeon_device
*rdev
)
420 rdev
->pll_errata
= 0;
422 if (rdev
->family
== CHIP_R300
&&
423 (RREG32(RADEON_CONFIG_CNTL
) & RADEON_CFG_ATI_REV_ID_MASK
) == RADEON_CFG_ATI_REV_A11
) {
424 rdev
->pll_errata
|= CHIP_ERRATA_R300_CG
;
428 int r300_mc_wait_for_idle(struct radeon_device
*rdev
)
433 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
435 tmp
= RREG32(0x0150);
436 if (tmp
& (1 << 4)) {
444 void r300_gpu_init(struct radeon_device
*rdev
)
446 uint32_t gb_tile_config
, tmp
;
448 r100_hdp_reset(rdev
);
449 /* FIXME: rv380 one pipes ? */
450 if ((rdev
->family
== CHIP_R300
) || (rdev
->family
== CHIP_R350
)) {
452 rdev
->num_gb_pipes
= 2;
454 /* rv350,rv370,rv380 */
455 rdev
->num_gb_pipes
= 1;
457 rdev
->num_z_pipes
= 1;
458 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
459 switch (rdev
->num_gb_pipes
) {
461 gb_tile_config
|= R300_PIPE_COUNT_R300
;
464 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
467 gb_tile_config
|= R300_PIPE_COUNT_R420
;
471 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
474 WREG32(R300_GB_TILE_CONFIG
, gb_tile_config
);
476 if (r100_gui_wait_for_idle(rdev
)) {
477 printk(KERN_WARNING
"Failed to wait GUI idle while "
478 "programming pipes. Bad things might happen.\n");
481 tmp
= RREG32(0x170C);
482 WREG32(0x170C, tmp
| (1 << 31));
484 WREG32(R300_RB2D_DSTCACHE_MODE
,
485 R300_DC_AUTOFLUSH_ENABLE
|
486 R300_DC_DC_DISABLE_IGNORE_PE
);
488 if (r100_gui_wait_for_idle(rdev
)) {
489 printk(KERN_WARNING
"Failed to wait GUI idle while "
490 "programming pipes. Bad things might happen.\n");
492 if (r300_mc_wait_for_idle(rdev
)) {
493 printk(KERN_WARNING
"Failed to wait MC idle while "
494 "programming pipes. Bad things might happen.\n");
496 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
497 rdev
->num_gb_pipes
, rdev
->num_z_pipes
);
500 int r300_ga_reset(struct radeon_device
*rdev
)
506 reinit_cp
= rdev
->cp
.ready
;
507 rdev
->cp
.ready
= false;
508 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
509 WREG32(RADEON_CP_CSQ_MODE
, 0);
510 WREG32(RADEON_CP_CSQ_CNTL
, 0);
511 WREG32(RADEON_RBBM_SOFT_RESET
, 0x32005);
512 (void)RREG32(RADEON_RBBM_SOFT_RESET
);
514 WREG32(RADEON_RBBM_SOFT_RESET
, 0);
515 /* Wait to prevent race in RBBM_STATUS */
517 tmp
= RREG32(RADEON_RBBM_STATUS
);
518 if (tmp
& ((1 << 20) | (1 << 26))) {
519 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp
);
520 /* GA still busy soft reset it */
521 WREG32(0x429C, 0x200);
522 WREG32(R300_VAP_PVS_STATE_FLUSH_REG
, 0);
527 /* Wait to prevent race in RBBM_STATUS */
529 tmp
= RREG32(RADEON_RBBM_STATUS
);
530 if (!(tmp
& ((1 << 20) | (1 << 26)))) {
534 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
535 tmp
= RREG32(RADEON_RBBM_STATUS
);
536 if (!(tmp
& ((1 << 20) | (1 << 26)))) {
537 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
540 return r100_cp_init(rdev
, rdev
->cp
.ring_size
);
546 tmp
= RREG32(RADEON_RBBM_STATUS
);
547 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp
);
551 int r300_gpu_reset(struct radeon_device
*rdev
)
555 /* reset order likely matter */
556 status
= RREG32(RADEON_RBBM_STATUS
);
558 r100_hdp_reset(rdev
);
560 if (status
& ((1 << 17) | (1 << 18) | (1 << 27))) {
561 r100_rb2d_reset(rdev
);
564 if (status
& ((1 << 20) | (1 << 26))) {
568 status
= RREG32(RADEON_RBBM_STATUS
);
569 if (status
& (1 << 16)) {
572 /* Check if GPU is idle */
573 status
= RREG32(RADEON_RBBM_STATUS
);
574 if (status
& (1 << 31)) {
575 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status
);
578 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status
);
584 * r300,r350,rv350,rv380 VRAM info
586 void r300_vram_info(struct radeon_device
*rdev
)
590 /* DDR for all card after R300 & IGP */
591 rdev
->mc
.vram_is_ddr
= true;
592 tmp
= RREG32(RADEON_MEM_CNTL
);
593 if (tmp
& R300_MEM_NUM_CHANNELS_MASK
) {
594 rdev
->mc
.vram_width
= 128;
596 rdev
->mc
.vram_width
= 64;
599 r100_vram_init_sizes(rdev
);
607 void rv370_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
609 uint32_t link_width_cntl
, mask
;
611 if (rdev
->flags
& RADEON_IS_IGP
)
614 if (!(rdev
->flags
& RADEON_IS_PCIE
))
617 /* FIXME wait for idle */
621 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
624 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
627 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
630 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
633 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
636 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
640 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
644 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
646 if ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) ==
647 (mask
<< RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
))
650 link_width_cntl
&= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK
|
651 RADEON_PCIE_LC_RECONFIG_NOW
|
652 RADEON_PCIE_LC_RECONFIG_LATER
|
653 RADEON_PCIE_LC_SHORT_RECONFIG_EN
);
654 link_width_cntl
|= mask
;
655 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
656 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, (link_width_cntl
|
657 RADEON_PCIE_LC_RECONFIG_NOW
));
659 /* wait for lane set to complete */
660 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
661 while (link_width_cntl
== 0xffffffff)
662 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
670 #if defined(CONFIG_DEBUG_FS)
671 static int rv370_debugfs_pcie_gart_info(struct seq_file
*m
, void *data
)
673 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
674 struct drm_device
*dev
= node
->minor
->dev
;
675 struct radeon_device
*rdev
= dev
->dev_private
;
678 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
679 seq_printf(m
, "PCIE_TX_GART_CNTL 0x%08x\n", tmp
);
680 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_BASE
);
681 seq_printf(m
, "PCIE_TX_GART_BASE 0x%08x\n", tmp
);
682 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
);
683 seq_printf(m
, "PCIE_TX_GART_START_LO 0x%08x\n", tmp
);
684 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
);
685 seq_printf(m
, "PCIE_TX_GART_START_HI 0x%08x\n", tmp
);
686 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
);
687 seq_printf(m
, "PCIE_TX_GART_END_LO 0x%08x\n", tmp
);
688 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
);
689 seq_printf(m
, "PCIE_TX_GART_END_HI 0x%08x\n", tmp
);
690 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR
);
691 seq_printf(m
, "PCIE_TX_GART_ERROR 0x%08x\n", tmp
);
695 static struct drm_info_list rv370_pcie_gart_info_list
[] = {
696 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info
, 0, NULL
},
700 int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
)
702 #if defined(CONFIG_DEBUG_FS)
703 return radeon_debugfs_add_files(rdev
, rv370_pcie_gart_info_list
, 1);
713 static int r300_packet0_check(struct radeon_cs_parser
*p
,
714 struct radeon_cs_packet
*pkt
,
715 unsigned idx
, unsigned reg
)
717 struct radeon_cs_chunk
*ib_chunk
;
718 struct radeon_cs_reloc
*reloc
;
719 struct r100_cs_track
*track
;
720 volatile uint32_t *ib
;
721 uint32_t tmp
, tile_flags
= 0;
726 ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
727 track
= (struct r100_cs_track
*)p
->track
;
729 case AVIVO_D1MODE_VLINE_START_END
:
730 case RADEON_CRTC_GUI_TRIG_VLINE
:
731 r
= r100_cs_packet_parse_vline(p
);
733 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
735 r100_cs_dump_packet(p
, pkt
);
739 case RADEON_DST_PITCH_OFFSET
:
740 case RADEON_SRC_PITCH_OFFSET
:
741 r
= r100_reloc_pitch_offset(p
, pkt
, idx
, reg
);
745 case R300_RB3D_COLOROFFSET0
:
746 case R300_RB3D_COLOROFFSET1
:
747 case R300_RB3D_COLOROFFSET2
:
748 case R300_RB3D_COLOROFFSET3
:
749 i
= (reg
- R300_RB3D_COLOROFFSET0
) >> 2;
750 r
= r100_cs_packet_next_reloc(p
, &reloc
);
752 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
754 r100_cs_dump_packet(p
, pkt
);
757 track
->cb
[i
].robj
= reloc
->robj
;
758 track
->cb
[i
].offset
= ib_chunk
->kdata
[idx
];
759 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
761 case R300_ZB_DEPTHOFFSET
:
762 r
= r100_cs_packet_next_reloc(p
, &reloc
);
764 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
766 r100_cs_dump_packet(p
, pkt
);
769 track
->zb
.robj
= reloc
->robj
;
770 track
->zb
.offset
= ib_chunk
->kdata
[idx
];
771 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
773 case R300_TX_OFFSET_0
:
774 case R300_TX_OFFSET_0
+4:
775 case R300_TX_OFFSET_0
+8:
776 case R300_TX_OFFSET_0
+12:
777 case R300_TX_OFFSET_0
+16:
778 case R300_TX_OFFSET_0
+20:
779 case R300_TX_OFFSET_0
+24:
780 case R300_TX_OFFSET_0
+28:
781 case R300_TX_OFFSET_0
+32:
782 case R300_TX_OFFSET_0
+36:
783 case R300_TX_OFFSET_0
+40:
784 case R300_TX_OFFSET_0
+44:
785 case R300_TX_OFFSET_0
+48:
786 case R300_TX_OFFSET_0
+52:
787 case R300_TX_OFFSET_0
+56:
788 case R300_TX_OFFSET_0
+60:
789 i
= (reg
- R300_TX_OFFSET_0
) >> 2;
790 r
= r100_cs_packet_next_reloc(p
, &reloc
);
792 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
794 r100_cs_dump_packet(p
, pkt
);
797 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
798 track
->textures
[i
].robj
= reloc
->robj
;
800 /* Tracked registers */
803 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
807 track
->vtx_size
= ib_chunk
->kdata
[idx
] & 0x7F;
810 /* VAP_VF_MAX_VTX_INDX */
811 track
->max_indx
= ib_chunk
->kdata
[idx
] & 0x00FFFFFFUL
;
815 track
->maxy
= ((ib_chunk
->kdata
[idx
] >> 13) & 0x1FFF) + 1;
816 if (p
->rdev
->family
< CHIP_RV515
) {
822 track
->num_cb
= ((ib_chunk
->kdata
[idx
] >> 5) & 0x3) + 1;
828 /* RB3D_COLORPITCH0 */
829 /* RB3D_COLORPITCH1 */
830 /* RB3D_COLORPITCH2 */
831 /* RB3D_COLORPITCH3 */
832 r
= r100_cs_packet_next_reloc(p
, &reloc
);
834 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
836 r100_cs_dump_packet(p
, pkt
);
840 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
841 tile_flags
|= R300_COLOR_TILE_ENABLE
;
842 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
843 tile_flags
|= R300_COLOR_MICROTILE_ENABLE
;
845 tmp
= ib_chunk
->kdata
[idx
] & ~(0x7 << 16);
849 i
= (reg
- 0x4E38) >> 2;
850 track
->cb
[i
].pitch
= ib_chunk
->kdata
[idx
] & 0x3FFE;
851 switch (((ib_chunk
->kdata
[idx
] >> 21) & 0xF)) {
855 track
->cb
[i
].cpp
= 1;
861 track
->cb
[i
].cpp
= 2;
864 track
->cb
[i
].cpp
= 4;
867 track
->cb
[i
].cpp
= 8;
870 track
->cb
[i
].cpp
= 16;
873 DRM_ERROR("Invalid color buffer format (%d) !\n",
874 ((ib_chunk
->kdata
[idx
] >> 21) & 0xF));
880 if (ib_chunk
->kdata
[idx
] & 2) {
881 track
->z_enabled
= true;
883 track
->z_enabled
= false;
888 switch ((ib_chunk
->kdata
[idx
] & 0xF)) {
897 DRM_ERROR("Invalid z buffer format (%d) !\n",
898 (ib_chunk
->kdata
[idx
] & 0xF));
904 r
= r100_cs_packet_next_reloc(p
, &reloc
);
906 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
908 r100_cs_dump_packet(p
, pkt
);
912 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
913 tile_flags
|= R300_DEPTHMACROTILE_ENABLE
;
914 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
915 tile_flags
|= R300_DEPTHMICROTILE_TILED
;;
917 tmp
= ib_chunk
->kdata
[idx
] & ~(0x7 << 16);
921 track
->zb
.pitch
= ib_chunk
->kdata
[idx
] & 0x3FFC;
924 for (i
= 0; i
< 16; i
++) {
927 enabled
= !!(ib_chunk
->kdata
[idx
] & (1 << i
));
928 track
->textures
[i
].enabled
= enabled
;
947 /* TX_FORMAT1_[0-15] */
948 i
= (reg
- 0x44C0) >> 2;
949 tmp
= (ib_chunk
->kdata
[idx
] >> 25) & 0x3;
950 track
->textures
[i
].tex_coord_type
= tmp
;
951 switch ((ib_chunk
->kdata
[idx
] & 0x1F)) {
952 case R300_TX_FORMAT_X8
:
953 case R300_TX_FORMAT_Y4X4
:
954 case R300_TX_FORMAT_Z3Y3X2
:
955 track
->textures
[i
].cpp
= 1;
957 case R300_TX_FORMAT_X16
:
958 case R300_TX_FORMAT_Y8X8
:
959 case R300_TX_FORMAT_Z5Y6X5
:
960 case R300_TX_FORMAT_Z6Y5X5
:
961 case R300_TX_FORMAT_W4Z4Y4X4
:
962 case R300_TX_FORMAT_W1Z5Y5X5
:
963 case R300_TX_FORMAT_DXT1
:
964 case R300_TX_FORMAT_D3DMFT_CxV8U8
:
965 case R300_TX_FORMAT_B8G8_B8G8
:
966 case R300_TX_FORMAT_G8R8_G8B8
:
967 track
->textures
[i
].cpp
= 2;
969 case R300_TX_FORMAT_Y16X16
:
970 case R300_TX_FORMAT_Z11Y11X10
:
971 case R300_TX_FORMAT_Z10Y11X11
:
972 case R300_TX_FORMAT_W8Z8Y8X8
:
973 case R300_TX_FORMAT_W2Z10Y10X10
:
975 case R300_TX_FORMAT_FL_I32
:
977 case R300_TX_FORMAT_DXT3
:
978 case R300_TX_FORMAT_DXT5
:
979 track
->textures
[i
].cpp
= 4;
981 case R300_TX_FORMAT_W16Z16Y16X16
:
982 case R300_TX_FORMAT_FL_R16G16B16A16
:
983 case R300_TX_FORMAT_FL_I32A32
:
984 track
->textures
[i
].cpp
= 8;
986 case R300_TX_FORMAT_FL_R32G32B32A32
:
987 track
->textures
[i
].cpp
= 16;
990 DRM_ERROR("Invalid texture format %u\n",
991 (ib_chunk
->kdata
[idx
] & 0x1F));
1012 /* TX_FILTER0_[0-15] */
1013 i
= (reg
- 0x4400) >> 2;
1014 tmp
= ib_chunk
->kdata
[idx
] & 0x7;
1015 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
1016 track
->textures
[i
].roundup_w
= false;
1018 tmp
= (ib_chunk
->kdata
[idx
] >> 3) & 0x7;
1019 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
1020 track
->textures
[i
].roundup_h
= false;
1039 /* TX_FORMAT2_[0-15] */
1040 i
= (reg
- 0x4500) >> 2;
1041 tmp
= ib_chunk
->kdata
[idx
] & 0x3FFF;
1042 track
->textures
[i
].pitch
= tmp
+ 1;
1043 if (p
->rdev
->family
>= CHIP_RV515
) {
1044 tmp
= ((ib_chunk
->kdata
[idx
] >> 15) & 1) << 11;
1045 track
->textures
[i
].width_11
= tmp
;
1046 tmp
= ((ib_chunk
->kdata
[idx
] >> 16) & 1) << 11;
1047 track
->textures
[i
].height_11
= tmp
;
1066 /* TX_FORMAT0_[0-15] */
1067 i
= (reg
- 0x4480) >> 2;
1068 tmp
= ib_chunk
->kdata
[idx
] & 0x7FF;
1069 track
->textures
[i
].width
= tmp
+ 1;
1070 tmp
= (ib_chunk
->kdata
[idx
] >> 11) & 0x7FF;
1071 track
->textures
[i
].height
= tmp
+ 1;
1072 tmp
= (ib_chunk
->kdata
[idx
] >> 26) & 0xF;
1073 track
->textures
[i
].num_levels
= tmp
;
1074 tmp
= ib_chunk
->kdata
[idx
] & (1 << 31);
1075 track
->textures
[i
].use_pitch
= !!tmp
;
1076 tmp
= (ib_chunk
->kdata
[idx
] >> 22) & 0xF;
1077 track
->textures
[i
].txdepth
= tmp
;
1079 case R300_ZB_ZPASS_ADDR
:
1080 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1082 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1084 r100_cs_dump_packet(p
, pkt
);
1087 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
1090 /* valid register only on RV530 */
1091 if (p
->rdev
->family
== CHIP_RV530
)
1093 /* fallthrough do not move */
1095 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
1102 static int r300_packet3_check(struct radeon_cs_parser
*p
,
1103 struct radeon_cs_packet
*pkt
)
1105 struct radeon_cs_chunk
*ib_chunk
;
1107 struct radeon_cs_reloc
*reloc
;
1108 struct r100_cs_track
*track
;
1109 volatile uint32_t *ib
;
1115 ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
1117 track
= (struct r100_cs_track
*)p
->track
;
1118 switch(pkt
->opcode
) {
1119 case PACKET3_3D_LOAD_VBPNTR
:
1120 c
= ib_chunk
->kdata
[idx
++] & 0x1F;
1121 track
->num_arrays
= c
;
1122 for (i
= 0; i
< (c
- 1); i
+=2, idx
+=3) {
1123 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1125 DRM_ERROR("No reloc for packet3 %d\n",
1127 r100_cs_dump_packet(p
, pkt
);
1130 ib
[idx
+1] = ib_chunk
->kdata
[idx
+1] + ((u32
)reloc
->lobj
.gpu_offset
);
1131 track
->arrays
[i
+ 0].robj
= reloc
->robj
;
1132 track
->arrays
[i
+ 0].esize
= ib_chunk
->kdata
[idx
] >> 8;
1133 track
->arrays
[i
+ 0].esize
&= 0x7F;
1134 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1136 DRM_ERROR("No reloc for packet3 %d\n",
1138 r100_cs_dump_packet(p
, pkt
);
1141 ib
[idx
+2] = ib_chunk
->kdata
[idx
+2] + ((u32
)reloc
->lobj
.gpu_offset
);
1142 track
->arrays
[i
+ 1].robj
= reloc
->robj
;
1143 track
->arrays
[i
+ 1].esize
= ib_chunk
->kdata
[idx
] >> 24;
1144 track
->arrays
[i
+ 1].esize
&= 0x7F;
1147 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1149 DRM_ERROR("No reloc for packet3 %d\n",
1151 r100_cs_dump_packet(p
, pkt
);
1154 ib
[idx
+1] = ib_chunk
->kdata
[idx
+1] + ((u32
)reloc
->lobj
.gpu_offset
);
1155 track
->arrays
[i
+ 0].robj
= reloc
->robj
;
1156 track
->arrays
[i
+ 0].esize
= ib_chunk
->kdata
[idx
] >> 8;
1157 track
->arrays
[i
+ 0].esize
&= 0x7F;
1160 case PACKET3_INDX_BUFFER
:
1161 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1163 DRM_ERROR("No reloc for packet3 %d\n", pkt
->opcode
);
1164 r100_cs_dump_packet(p
, pkt
);
1167 ib
[idx
+1] = ib_chunk
->kdata
[idx
+1] + ((u32
)reloc
->lobj
.gpu_offset
);
1168 r
= r100_cs_track_check_pkt3_indx_buffer(p
, pkt
, reloc
->robj
);
1174 case PACKET3_3D_DRAW_IMMD
:
1175 /* Number of dwords is vtx_size * (num_vertices - 1)
1176 * PRIM_WALK must be equal to 3 vertex data in embedded
1178 if (((ib_chunk
->kdata
[idx
+1] >> 4) & 0x3) != 3) {
1179 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1182 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
+1];
1183 track
->immd_dwords
= pkt
->count
- 1;
1184 r
= r100_cs_track_check(p
->rdev
, track
);
1189 case PACKET3_3D_DRAW_IMMD_2
:
1190 /* Number of dwords is vtx_size * (num_vertices - 1)
1191 * PRIM_WALK must be equal to 3 vertex data in embedded
1193 if (((ib_chunk
->kdata
[idx
] >> 4) & 0x3) != 3) {
1194 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1197 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
1198 track
->immd_dwords
= pkt
->count
;
1199 r
= r100_cs_track_check(p
->rdev
, track
);
1204 case PACKET3_3D_DRAW_VBUF
:
1205 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
+ 1];
1206 r
= r100_cs_track_check(p
->rdev
, track
);
1211 case PACKET3_3D_DRAW_VBUF_2
:
1212 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
1213 r
= r100_cs_track_check(p
->rdev
, track
);
1218 case PACKET3_3D_DRAW_INDX
:
1219 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
+ 1];
1220 r
= r100_cs_track_check(p
->rdev
, track
);
1225 case PACKET3_3D_DRAW_INDX_2
:
1226 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
1227 r
= r100_cs_track_check(p
->rdev
, track
);
1235 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1241 int r300_cs_parse(struct radeon_cs_parser
*p
)
1243 struct radeon_cs_packet pkt
;
1244 struct r100_cs_track track
;
1247 r100_cs_track_clear(p
->rdev
, &track
);
1250 r
= r100_cs_packet_parse(p
, &pkt
, p
->idx
);
1254 p
->idx
+= pkt
.count
+ 2;
1257 r
= r100_cs_parse_packet0(p
, &pkt
,
1258 p
->rdev
->config
.r300
.reg_safe_bm
,
1259 p
->rdev
->config
.r300
.reg_safe_bm_size
,
1260 &r300_packet0_check
);
1265 r
= r300_packet3_check(p
, &pkt
);
1268 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1274 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1278 int r300_init(struct radeon_device
*rdev
)
1280 rdev
->config
.r300
.reg_safe_bm
= r300_reg_safe_bm
;
1281 rdev
->config
.r300
.reg_safe_bm_size
= ARRAY_SIZE(r300_reg_safe_bm
);