2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
31 #include "radeon_reg.h"
33 #include "radeon_asic.h"
34 #include "radeon_drm.h"
35 #include "r100_track.h"
38 #include "r300_reg_safe.h"
40 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
43 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
44 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
45 * However, scheduling such write to the ring seems harmless, i suspect
46 * the CP read collide with the flush somehow, or maybe the MC, hard to
47 * tell. (Jerome Glisse)
51 * rv370,rv380 PCIE GART
53 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
);
55 void rv370_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
60 /* Workaround HW bug do flush 2 times */
61 for (i
= 0; i
< 2; i
++) {
62 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
63 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
| RADEON_PCIE_TX_GART_INVALIDATE_TLB
);
64 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
65 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
70 int rv370_pcie_gart_set_page(struct radeon_device
*rdev
, int i
, uint64_t addr
)
72 void __iomem
*ptr
= (void *)rdev
->gart
.table
.vram
.ptr
;
74 if (i
< 0 || i
> rdev
->gart
.num_gpu_pages
) {
77 addr
= (lower_32_bits(addr
) >> 8) |
78 ((upper_32_bits(addr
) & 0xff) << 24) |
80 /* on x86 we want this to be CPU endian, on powerpc
81 * on powerpc without HW swappers, it'll get swapped on way
82 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
83 writel(addr
, ((void __iomem
*)ptr
) + (i
* 4));
87 int rv370_pcie_gart_init(struct radeon_device
*rdev
)
91 if (rdev
->gart
.table
.vram
.robj
) {
92 WARN(1, "RV370 PCIE GART already initialized.\n");
95 /* Initialize common gart structure */
96 r
= radeon_gart_init(rdev
);
99 r
= rv370_debugfs_pcie_gart_info_init(rdev
);
101 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
102 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 4;
103 rdev
->asic
->gart_tlb_flush
= &rv370_pcie_gart_tlb_flush
;
104 rdev
->asic
->gart_set_page
= &rv370_pcie_gart_set_page
;
105 return radeon_gart_table_vram_alloc(rdev
);
108 int rv370_pcie_gart_enable(struct radeon_device
*rdev
)
114 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
115 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
118 r
= radeon_gart_table_vram_pin(rdev
);
121 radeon_gart_restore(rdev
);
122 /* discard memory request outside of configured range */
123 tmp
= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
124 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
125 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
, rdev
->mc
.gtt_start
);
126 tmp
= rdev
->mc
.gtt_end
& ~RADEON_GPU_PAGE_MASK
;
127 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
, tmp
);
128 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
, 0);
129 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
, 0);
130 table_addr
= rdev
->gart
.table_addr
;
131 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE
, table_addr
);
132 /* FIXME: setup default page */
133 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO
, rdev
->mc
.vram_start
);
134 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI
, 0);
136 WREG32_PCIE(0x18, 0);
137 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
138 tmp
|= RADEON_PCIE_TX_GART_EN
;
139 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
140 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
141 rv370_pcie_gart_tlb_flush(rdev
);
142 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
143 (unsigned)(rdev
->mc
.gtt_size
>> 20), table_addr
);
144 rdev
->gart
.ready
= true;
148 void rv370_pcie_gart_disable(struct radeon_device
*rdev
)
153 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
154 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
155 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
& ~RADEON_PCIE_TX_GART_EN
);
156 if (rdev
->gart
.table
.vram
.robj
) {
157 r
= radeon_bo_reserve(rdev
->gart
.table
.vram
.robj
, false);
158 if (likely(r
== 0)) {
159 radeon_bo_kunmap(rdev
->gart
.table
.vram
.robj
);
160 radeon_bo_unpin(rdev
->gart
.table
.vram
.robj
);
161 radeon_bo_unreserve(rdev
->gart
.table
.vram
.robj
);
166 void rv370_pcie_gart_fini(struct radeon_device
*rdev
)
168 radeon_gart_fini(rdev
);
169 rv370_pcie_gart_disable(rdev
);
170 radeon_gart_table_vram_free(rdev
);
173 void r300_fence_ring_emit(struct radeon_device
*rdev
,
174 struct radeon_fence
*fence
)
176 /* Who ever call radeon_fence_emit should call ring_lock and ask
177 * for enough space (today caller are ib schedule and buffer move) */
178 /* Write SC register so SC & US assert idle */
179 radeon_ring_write(rdev
, PACKET0(R300_RE_SCISSORS_TL
, 0));
180 radeon_ring_write(rdev
, 0);
181 radeon_ring_write(rdev
, PACKET0(R300_RE_SCISSORS_BR
, 0));
182 radeon_ring_write(rdev
, 0);
184 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
185 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
);
186 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
187 radeon_ring_write(rdev
, R300_ZC_FLUSH
);
188 /* Wait until IDLE & CLEAN */
189 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
190 radeon_ring_write(rdev
, (RADEON_WAIT_3D_IDLECLEAN
|
191 RADEON_WAIT_2D_IDLECLEAN
|
192 RADEON_WAIT_DMA_GUI_IDLE
));
193 radeon_ring_write(rdev
, PACKET0(RADEON_HOST_PATH_CNTL
, 0));
194 radeon_ring_write(rdev
, rdev
->config
.r300
.hdp_cntl
|
195 RADEON_HDP_READ_BUFFER_INVALIDATE
);
196 radeon_ring_write(rdev
, PACKET0(RADEON_HOST_PATH_CNTL
, 0));
197 radeon_ring_write(rdev
, rdev
->config
.r300
.hdp_cntl
);
198 /* Emit fence sequence & fire IRQ */
199 radeon_ring_write(rdev
, PACKET0(rdev
->fence_drv
.scratch_reg
, 0));
200 radeon_ring_write(rdev
, fence
->seq
);
201 radeon_ring_write(rdev
, PACKET0(RADEON_GEN_INT_STATUS
, 0));
202 radeon_ring_write(rdev
, RADEON_SW_INT_FIRE
);
205 void r300_ring_start(struct radeon_device
*rdev
)
207 unsigned gb_tile_config
;
210 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
211 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
212 switch(rdev
->num_gb_pipes
) {
214 gb_tile_config
|= R300_PIPE_COUNT_R300
;
217 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
220 gb_tile_config
|= R300_PIPE_COUNT_R420
;
224 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
228 r
= radeon_ring_lock(rdev
, 64);
232 radeon_ring_write(rdev
, PACKET0(RADEON_ISYNC_CNTL
, 0));
233 radeon_ring_write(rdev
,
234 RADEON_ISYNC_ANY2D_IDLE3D
|
235 RADEON_ISYNC_ANY3D_IDLE2D
|
236 RADEON_ISYNC_WAIT_IDLEGUI
|
237 RADEON_ISYNC_CPSCRATCH_IDLEGUI
);
238 radeon_ring_write(rdev
, PACKET0(R300_GB_TILE_CONFIG
, 0));
239 radeon_ring_write(rdev
, gb_tile_config
);
240 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
241 radeon_ring_write(rdev
,
242 RADEON_WAIT_2D_IDLECLEAN
|
243 RADEON_WAIT_3D_IDLECLEAN
);
244 radeon_ring_write(rdev
, PACKET0(R300_DST_PIPE_CONFIG
, 0));
245 radeon_ring_write(rdev
, R300_PIPE_AUTO_CONFIG
);
246 radeon_ring_write(rdev
, PACKET0(R300_GB_SELECT
, 0));
247 radeon_ring_write(rdev
, 0);
248 radeon_ring_write(rdev
, PACKET0(R300_GB_ENABLE
, 0));
249 radeon_ring_write(rdev
, 0);
250 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
251 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
252 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
253 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
254 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
255 radeon_ring_write(rdev
,
256 RADEON_WAIT_2D_IDLECLEAN
|
257 RADEON_WAIT_3D_IDLECLEAN
);
258 radeon_ring_write(rdev
, PACKET0(R300_GB_AA_CONFIG
, 0));
259 radeon_ring_write(rdev
, 0);
260 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
261 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
262 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
263 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
264 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS0
, 0));
265 radeon_ring_write(rdev
,
266 ((6 << R300_MS_X0_SHIFT
) |
267 (6 << R300_MS_Y0_SHIFT
) |
268 (6 << R300_MS_X1_SHIFT
) |
269 (6 << R300_MS_Y1_SHIFT
) |
270 (6 << R300_MS_X2_SHIFT
) |
271 (6 << R300_MS_Y2_SHIFT
) |
272 (6 << R300_MSBD0_Y_SHIFT
) |
273 (6 << R300_MSBD0_X_SHIFT
)));
274 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS1
, 0));
275 radeon_ring_write(rdev
,
276 ((6 << R300_MS_X3_SHIFT
) |
277 (6 << R300_MS_Y3_SHIFT
) |
278 (6 << R300_MS_X4_SHIFT
) |
279 (6 << R300_MS_Y4_SHIFT
) |
280 (6 << R300_MS_X5_SHIFT
) |
281 (6 << R300_MS_Y5_SHIFT
) |
282 (6 << R300_MSBD1_SHIFT
)));
283 radeon_ring_write(rdev
, PACKET0(R300_GA_ENHANCE
, 0));
284 radeon_ring_write(rdev
, R300_GA_DEADLOCK_CNTL
| R300_GA_FASTSYNC_CNTL
);
285 radeon_ring_write(rdev
, PACKET0(R300_GA_POLY_MODE
, 0));
286 radeon_ring_write(rdev
,
287 R300_FRONT_PTYPE_TRIANGE
| R300_BACK_PTYPE_TRIANGE
);
288 radeon_ring_write(rdev
, PACKET0(R300_GA_ROUND_MODE
, 0));
289 radeon_ring_write(rdev
,
290 R300_GEOMETRY_ROUND_NEAREST
|
291 R300_COLOR_ROUND_NEAREST
);
292 radeon_ring_unlock_commit(rdev
);
295 void r300_errata(struct radeon_device
*rdev
)
297 rdev
->pll_errata
= 0;
299 if (rdev
->family
== CHIP_R300
&&
300 (RREG32(RADEON_CONFIG_CNTL
) & RADEON_CFG_ATI_REV_ID_MASK
) == RADEON_CFG_ATI_REV_A11
) {
301 rdev
->pll_errata
|= CHIP_ERRATA_R300_CG
;
305 int r300_mc_wait_for_idle(struct radeon_device
*rdev
)
310 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
312 tmp
= RREG32(RADEON_MC_STATUS
);
313 if (tmp
& R300_MC_IDLE
) {
321 void r300_gpu_init(struct radeon_device
*rdev
)
323 uint32_t gb_tile_config
, tmp
;
325 r100_hdp_reset(rdev
);
326 /* FIXME: rv380 one pipes ? */
327 if ((rdev
->family
== CHIP_R300
) || (rdev
->family
== CHIP_R350
)) {
329 rdev
->num_gb_pipes
= 2;
331 /* rv350,rv370,rv380 */
332 rdev
->num_gb_pipes
= 1;
334 rdev
->num_z_pipes
= 1;
335 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
336 switch (rdev
->num_gb_pipes
) {
338 gb_tile_config
|= R300_PIPE_COUNT_R300
;
341 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
344 gb_tile_config
|= R300_PIPE_COUNT_R420
;
348 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
351 WREG32(R300_GB_TILE_CONFIG
, gb_tile_config
);
353 if (r100_gui_wait_for_idle(rdev
)) {
354 printk(KERN_WARNING
"Failed to wait GUI idle while "
355 "programming pipes. Bad things might happen.\n");
358 tmp
= RREG32(R300_DST_PIPE_CONFIG
);
359 WREG32(R300_DST_PIPE_CONFIG
, tmp
| R300_PIPE_AUTO_CONFIG
);
361 WREG32(R300_RB2D_DSTCACHE_MODE
,
362 R300_DC_AUTOFLUSH_ENABLE
|
363 R300_DC_DC_DISABLE_IGNORE_PE
);
365 if (r100_gui_wait_for_idle(rdev
)) {
366 printk(KERN_WARNING
"Failed to wait GUI idle while "
367 "programming pipes. Bad things might happen.\n");
369 if (r300_mc_wait_for_idle(rdev
)) {
370 printk(KERN_WARNING
"Failed to wait MC idle while "
371 "programming pipes. Bad things might happen.\n");
373 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
374 rdev
->num_gb_pipes
, rdev
->num_z_pipes
);
377 int r300_ga_reset(struct radeon_device
*rdev
)
383 reinit_cp
= rdev
->cp
.ready
;
384 rdev
->cp
.ready
= false;
385 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
386 WREG32(RADEON_CP_CSQ_MODE
, 0);
387 WREG32(RADEON_CP_CSQ_CNTL
, 0);
388 WREG32(RADEON_RBBM_SOFT_RESET
, 0x32005);
389 (void)RREG32(RADEON_RBBM_SOFT_RESET
);
391 WREG32(RADEON_RBBM_SOFT_RESET
, 0);
392 /* Wait to prevent race in RBBM_STATUS */
394 tmp
= RREG32(RADEON_RBBM_STATUS
);
395 if (tmp
& ((1 << 20) | (1 << 26))) {
396 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp
);
397 /* GA still busy soft reset it */
398 WREG32(0x429C, 0x200);
399 WREG32(R300_VAP_PVS_STATE_FLUSH_REG
, 0);
400 WREG32(R300_RE_SCISSORS_TL
, 0);
401 WREG32(R300_RE_SCISSORS_BR
, 0);
404 /* Wait to prevent race in RBBM_STATUS */
406 tmp
= RREG32(RADEON_RBBM_STATUS
);
407 if (!(tmp
& ((1 << 20) | (1 << 26)))) {
411 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
412 tmp
= RREG32(RADEON_RBBM_STATUS
);
413 if (!(tmp
& ((1 << 20) | (1 << 26)))) {
414 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
417 return r100_cp_init(rdev
, rdev
->cp
.ring_size
);
423 tmp
= RREG32(RADEON_RBBM_STATUS
);
424 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp
);
428 int r300_gpu_reset(struct radeon_device
*rdev
)
432 /* reset order likely matter */
433 status
= RREG32(RADEON_RBBM_STATUS
);
435 r100_hdp_reset(rdev
);
437 if (status
& ((1 << 17) | (1 << 18) | (1 << 27))) {
438 r100_rb2d_reset(rdev
);
441 if (status
& ((1 << 20) | (1 << 26))) {
445 status
= RREG32(RADEON_RBBM_STATUS
);
446 if (status
& (1 << 16)) {
449 /* Check if GPU is idle */
450 status
= RREG32(RADEON_RBBM_STATUS
);
451 if (status
& RADEON_RBBM_ACTIVE
) {
452 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status
);
455 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status
);
461 * r300,r350,rv350,rv380 VRAM info
463 void r300_mc_init(struct radeon_device
*rdev
)
468 /* DDR for all card after R300 & IGP */
469 rdev
->mc
.vram_is_ddr
= true;
470 tmp
= RREG32(RADEON_MEM_CNTL
);
471 tmp
&= R300_MEM_NUM_CHANNELS_MASK
;
473 case 0: rdev
->mc
.vram_width
= 64; break;
474 case 1: rdev
->mc
.vram_width
= 128; break;
475 case 2: rdev
->mc
.vram_width
= 256; break;
476 default: rdev
->mc
.vram_width
= 128; break;
478 r100_vram_init_sizes(rdev
);
479 base
= rdev
->mc
.aper_base
;
480 if (rdev
->flags
& RADEON_IS_IGP
)
481 base
= (RREG32(RADEON_NB_TOM
) & 0xffff) << 16;
482 radeon_vram_location(rdev
, &rdev
->mc
, base
);
483 if (!(rdev
->flags
& RADEON_IS_AGP
))
484 radeon_gtt_location(rdev
, &rdev
->mc
);
485 radeon_update_bandwidth_info(rdev
);
488 void rv370_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
490 uint32_t link_width_cntl
, mask
;
492 if (rdev
->flags
& RADEON_IS_IGP
)
495 if (!(rdev
->flags
& RADEON_IS_PCIE
))
498 /* FIXME wait for idle */
502 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
505 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
508 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
511 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
514 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
517 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
521 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
525 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
527 if ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) ==
528 (mask
<< RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
))
531 link_width_cntl
&= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK
|
532 RADEON_PCIE_LC_RECONFIG_NOW
|
533 RADEON_PCIE_LC_RECONFIG_LATER
|
534 RADEON_PCIE_LC_SHORT_RECONFIG_EN
);
535 link_width_cntl
|= mask
;
536 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
537 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, (link_width_cntl
|
538 RADEON_PCIE_LC_RECONFIG_NOW
));
540 /* wait for lane set to complete */
541 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
542 while (link_width_cntl
== 0xffffffff)
543 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
547 int rv370_get_pcie_lanes(struct radeon_device
*rdev
)
551 if (rdev
->flags
& RADEON_IS_IGP
)
554 if (!(rdev
->flags
& RADEON_IS_PCIE
))
557 /* FIXME wait for idle */
559 if (rdev
->family
< CHIP_R600
)
560 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
562 link_width_cntl
= RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
564 switch ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
) {
565 case RADEON_PCIE_LC_LINK_WIDTH_X0
:
567 case RADEON_PCIE_LC_LINK_WIDTH_X1
:
569 case RADEON_PCIE_LC_LINK_WIDTH_X2
:
571 case RADEON_PCIE_LC_LINK_WIDTH_X4
:
573 case RADEON_PCIE_LC_LINK_WIDTH_X8
:
575 case RADEON_PCIE_LC_LINK_WIDTH_X16
:
581 #if defined(CONFIG_DEBUG_FS)
582 static int rv370_debugfs_pcie_gart_info(struct seq_file
*m
, void *data
)
584 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
585 struct drm_device
*dev
= node
->minor
->dev
;
586 struct radeon_device
*rdev
= dev
->dev_private
;
589 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
590 seq_printf(m
, "PCIE_TX_GART_CNTL 0x%08x\n", tmp
);
591 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_BASE
);
592 seq_printf(m
, "PCIE_TX_GART_BASE 0x%08x\n", tmp
);
593 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
);
594 seq_printf(m
, "PCIE_TX_GART_START_LO 0x%08x\n", tmp
);
595 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
);
596 seq_printf(m
, "PCIE_TX_GART_START_HI 0x%08x\n", tmp
);
597 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
);
598 seq_printf(m
, "PCIE_TX_GART_END_LO 0x%08x\n", tmp
);
599 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
);
600 seq_printf(m
, "PCIE_TX_GART_END_HI 0x%08x\n", tmp
);
601 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR
);
602 seq_printf(m
, "PCIE_TX_GART_ERROR 0x%08x\n", tmp
);
606 static struct drm_info_list rv370_pcie_gart_info_list
[] = {
607 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info
, 0, NULL
},
611 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
)
613 #if defined(CONFIG_DEBUG_FS)
614 return radeon_debugfs_add_files(rdev
, rv370_pcie_gart_info_list
, 1);
620 static int r300_packet0_check(struct radeon_cs_parser
*p
,
621 struct radeon_cs_packet
*pkt
,
622 unsigned idx
, unsigned reg
)
624 struct radeon_cs_reloc
*reloc
;
625 struct r100_cs_track
*track
;
626 volatile uint32_t *ib
;
627 uint32_t tmp
, tile_flags
= 0;
633 track
= (struct r100_cs_track
*)p
->track
;
634 idx_value
= radeon_get_ib_value(p
, idx
);
637 case AVIVO_D1MODE_VLINE_START_END
:
638 case RADEON_CRTC_GUI_TRIG_VLINE
:
639 r
= r100_cs_packet_parse_vline(p
);
641 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
643 r100_cs_dump_packet(p
, pkt
);
647 case RADEON_DST_PITCH_OFFSET
:
648 case RADEON_SRC_PITCH_OFFSET
:
649 r
= r100_reloc_pitch_offset(p
, pkt
, idx
, reg
);
653 case R300_RB3D_COLOROFFSET0
:
654 case R300_RB3D_COLOROFFSET1
:
655 case R300_RB3D_COLOROFFSET2
:
656 case R300_RB3D_COLOROFFSET3
:
657 i
= (reg
- R300_RB3D_COLOROFFSET0
) >> 2;
658 r
= r100_cs_packet_next_reloc(p
, &reloc
);
660 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
662 r100_cs_dump_packet(p
, pkt
);
665 track
->cb
[i
].robj
= reloc
->robj
;
666 track
->cb
[i
].offset
= idx_value
;
667 ib
[idx
] = idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
669 case R300_ZB_DEPTHOFFSET
:
670 r
= r100_cs_packet_next_reloc(p
, &reloc
);
672 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
674 r100_cs_dump_packet(p
, pkt
);
677 track
->zb
.robj
= reloc
->robj
;
678 track
->zb
.offset
= idx_value
;
679 ib
[idx
] = idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
681 case R300_TX_OFFSET_0
:
682 case R300_TX_OFFSET_0
+4:
683 case R300_TX_OFFSET_0
+8:
684 case R300_TX_OFFSET_0
+12:
685 case R300_TX_OFFSET_0
+16:
686 case R300_TX_OFFSET_0
+20:
687 case R300_TX_OFFSET_0
+24:
688 case R300_TX_OFFSET_0
+28:
689 case R300_TX_OFFSET_0
+32:
690 case R300_TX_OFFSET_0
+36:
691 case R300_TX_OFFSET_0
+40:
692 case R300_TX_OFFSET_0
+44:
693 case R300_TX_OFFSET_0
+48:
694 case R300_TX_OFFSET_0
+52:
695 case R300_TX_OFFSET_0
+56:
696 case R300_TX_OFFSET_0
+60:
697 i
= (reg
- R300_TX_OFFSET_0
) >> 2;
698 r
= r100_cs_packet_next_reloc(p
, &reloc
);
700 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
702 r100_cs_dump_packet(p
, pkt
);
706 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
707 tile_flags
|= R300_TXO_MACRO_TILE
;
708 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
709 tile_flags
|= R300_TXO_MICRO_TILE
;
710 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
711 tile_flags
|= R300_TXO_MICRO_TILE_SQUARE
;
713 tmp
= idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
716 track
->textures
[i
].robj
= reloc
->robj
;
718 /* Tracked registers */
721 track
->vap_vf_cntl
= idx_value
;
725 track
->vtx_size
= idx_value
& 0x7F;
728 /* VAP_VF_MAX_VTX_INDX */
729 track
->max_indx
= idx_value
& 0x00FFFFFFUL
;
733 track
->maxy
= ((idx_value
>> 13) & 0x1FFF) + 1;
734 if (p
->rdev
->family
< CHIP_RV515
) {
740 track
->num_cb
= ((idx_value
>> 5) & 0x3) + 1;
746 /* RB3D_COLORPITCH0 */
747 /* RB3D_COLORPITCH1 */
748 /* RB3D_COLORPITCH2 */
749 /* RB3D_COLORPITCH3 */
750 r
= r100_cs_packet_next_reloc(p
, &reloc
);
752 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
754 r100_cs_dump_packet(p
, pkt
);
758 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
759 tile_flags
|= R300_COLOR_TILE_ENABLE
;
760 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
761 tile_flags
|= R300_COLOR_MICROTILE_ENABLE
;
762 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
763 tile_flags
|= R300_COLOR_MICROTILE_SQUARE_ENABLE
;
765 tmp
= idx_value
& ~(0x7 << 16);
769 i
= (reg
- 0x4E38) >> 2;
770 track
->cb
[i
].pitch
= idx_value
& 0x3FFE;
771 switch (((idx_value
>> 21) & 0xF)) {
775 track
->cb
[i
].cpp
= 1;
781 track
->cb
[i
].cpp
= 2;
784 track
->cb
[i
].cpp
= 4;
787 track
->cb
[i
].cpp
= 8;
790 track
->cb
[i
].cpp
= 16;
793 DRM_ERROR("Invalid color buffer format (%d) !\n",
794 ((idx_value
>> 21) & 0xF));
801 track
->z_enabled
= true;
803 track
->z_enabled
= false;
808 switch ((idx_value
& 0xF)) {
817 DRM_ERROR("Invalid z buffer format (%d) !\n",
824 r
= r100_cs_packet_next_reloc(p
, &reloc
);
826 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
828 r100_cs_dump_packet(p
, pkt
);
832 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
833 tile_flags
|= R300_DEPTHMACROTILE_ENABLE
;
834 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
835 tile_flags
|= R300_DEPTHMICROTILE_TILED
;
836 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
837 tile_flags
|= R300_DEPTHMICROTILE_TILED_SQUARE
;
839 tmp
= idx_value
& ~(0x7 << 16);
843 track
->zb
.pitch
= idx_value
& 0x3FFC;
846 for (i
= 0; i
< 16; i
++) {
849 enabled
= !!(idx_value
& (1 << i
));
850 track
->textures
[i
].enabled
= enabled
;
869 /* TX_FORMAT1_[0-15] */
870 i
= (reg
- 0x44C0) >> 2;
871 tmp
= (idx_value
>> 25) & 0x3;
872 track
->textures
[i
].tex_coord_type
= tmp
;
873 switch ((idx_value
& 0x1F)) {
874 case R300_TX_FORMAT_X8
:
875 case R300_TX_FORMAT_Y4X4
:
876 case R300_TX_FORMAT_Z3Y3X2
:
877 track
->textures
[i
].cpp
= 1;
879 case R300_TX_FORMAT_X16
:
880 case R300_TX_FORMAT_Y8X8
:
881 case R300_TX_FORMAT_Z5Y6X5
:
882 case R300_TX_FORMAT_Z6Y5X5
:
883 case R300_TX_FORMAT_W4Z4Y4X4
:
884 case R300_TX_FORMAT_W1Z5Y5X5
:
885 case R300_TX_FORMAT_D3DMFT_CxV8U8
:
886 case R300_TX_FORMAT_B8G8_B8G8
:
887 case R300_TX_FORMAT_G8R8_G8B8
:
888 track
->textures
[i
].cpp
= 2;
890 case R300_TX_FORMAT_Y16X16
:
891 case R300_TX_FORMAT_Z11Y11X10
:
892 case R300_TX_FORMAT_Z10Y11X11
:
893 case R300_TX_FORMAT_W8Z8Y8X8
:
894 case R300_TX_FORMAT_W2Z10Y10X10
:
896 case R300_TX_FORMAT_FL_I32
:
898 track
->textures
[i
].cpp
= 4;
900 case R300_TX_FORMAT_W16Z16Y16X16
:
901 case R300_TX_FORMAT_FL_R16G16B16A16
:
902 case R300_TX_FORMAT_FL_I32A32
:
903 track
->textures
[i
].cpp
= 8;
905 case R300_TX_FORMAT_FL_R32G32B32A32
:
906 track
->textures
[i
].cpp
= 16;
908 case R300_TX_FORMAT_DXT1
:
909 track
->textures
[i
].cpp
= 1;
910 track
->textures
[i
].compress_format
= R100_TRACK_COMP_DXT1
;
912 case R300_TX_FORMAT_ATI2N
:
913 if (p
->rdev
->family
< CHIP_R420
) {
914 DRM_ERROR("Invalid texture format %u\n",
918 /* The same rules apply as for DXT3/5. */
920 case R300_TX_FORMAT_DXT3
:
921 case R300_TX_FORMAT_DXT5
:
922 track
->textures
[i
].cpp
= 1;
923 track
->textures
[i
].compress_format
= R100_TRACK_COMP_DXT35
;
926 DRM_ERROR("Invalid texture format %u\n",
948 /* TX_FILTER0_[0-15] */
949 i
= (reg
- 0x4400) >> 2;
950 tmp
= idx_value
& 0x7;
951 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
952 track
->textures
[i
].roundup_w
= false;
954 tmp
= (idx_value
>> 3) & 0x7;
955 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
956 track
->textures
[i
].roundup_h
= false;
975 /* TX_FORMAT2_[0-15] */
976 i
= (reg
- 0x4500) >> 2;
977 tmp
= idx_value
& 0x3FFF;
978 track
->textures
[i
].pitch
= tmp
+ 1;
979 if (p
->rdev
->family
>= CHIP_RV515
) {
980 tmp
= ((idx_value
>> 15) & 1) << 11;
981 track
->textures
[i
].width_11
= tmp
;
982 tmp
= ((idx_value
>> 16) & 1) << 11;
983 track
->textures
[i
].height_11
= tmp
;
986 if (idx_value
& (1 << 14)) {
987 /* The same rules apply as for DXT1. */
988 track
->textures
[i
].compress_format
=
989 R100_TRACK_COMP_DXT1
;
991 } else if (idx_value
& (1 << 14)) {
992 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1012 /* TX_FORMAT0_[0-15] */
1013 i
= (reg
- 0x4480) >> 2;
1014 tmp
= idx_value
& 0x7FF;
1015 track
->textures
[i
].width
= tmp
+ 1;
1016 tmp
= (idx_value
>> 11) & 0x7FF;
1017 track
->textures
[i
].height
= tmp
+ 1;
1018 tmp
= (idx_value
>> 26) & 0xF;
1019 track
->textures
[i
].num_levels
= tmp
;
1020 tmp
= idx_value
& (1 << 31);
1021 track
->textures
[i
].use_pitch
= !!tmp
;
1022 tmp
= (idx_value
>> 22) & 0xF;
1023 track
->textures
[i
].txdepth
= tmp
;
1025 case R300_ZB_ZPASS_ADDR
:
1026 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1028 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1030 r100_cs_dump_packet(p
, pkt
);
1033 ib
[idx
] = idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
1036 /* RB3D_COLOR_CHANNEL_MASK */
1037 track
->color_channel_mask
= idx_value
;
1041 track
->fastfill
= !!(idx_value
& (1 << 2));
1044 /* RB3D_BLENDCNTL */
1045 track
->blend_read_enable
= !!(idx_value
& (1 << 2));
1048 /* valid register only on RV530 */
1049 if (p
->rdev
->family
== CHIP_RV530
)
1051 /* fallthrough do not move */
1053 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
1060 static int r300_packet3_check(struct radeon_cs_parser
*p
,
1061 struct radeon_cs_packet
*pkt
)
1063 struct radeon_cs_reloc
*reloc
;
1064 struct r100_cs_track
*track
;
1065 volatile uint32_t *ib
;
1071 track
= (struct r100_cs_track
*)p
->track
;
1072 switch(pkt
->opcode
) {
1073 case PACKET3_3D_LOAD_VBPNTR
:
1074 r
= r100_packet3_load_vbpntr(p
, pkt
, idx
);
1078 case PACKET3_INDX_BUFFER
:
1079 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1081 DRM_ERROR("No reloc for packet3 %d\n", pkt
->opcode
);
1082 r100_cs_dump_packet(p
, pkt
);
1085 ib
[idx
+1] = radeon_get_ib_value(p
, idx
+ 1) + ((u32
)reloc
->lobj
.gpu_offset
);
1086 r
= r100_cs_track_check_pkt3_indx_buffer(p
, pkt
, reloc
->robj
);
1092 case PACKET3_3D_DRAW_IMMD
:
1093 /* Number of dwords is vtx_size * (num_vertices - 1)
1094 * PRIM_WALK must be equal to 3 vertex data in embedded
1096 if (((radeon_get_ib_value(p
, idx
+ 1) >> 4) & 0x3) != 3) {
1097 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1100 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
+ 1);
1101 track
->immd_dwords
= pkt
->count
- 1;
1102 r
= r100_cs_track_check(p
->rdev
, track
);
1107 case PACKET3_3D_DRAW_IMMD_2
:
1108 /* Number of dwords is vtx_size * (num_vertices - 1)
1109 * PRIM_WALK must be equal to 3 vertex data in embedded
1111 if (((radeon_get_ib_value(p
, idx
) >> 4) & 0x3) != 3) {
1112 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1115 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
);
1116 track
->immd_dwords
= pkt
->count
;
1117 r
= r100_cs_track_check(p
->rdev
, track
);
1122 case PACKET3_3D_DRAW_VBUF
:
1123 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
+ 1);
1124 r
= r100_cs_track_check(p
->rdev
, track
);
1129 case PACKET3_3D_DRAW_VBUF_2
:
1130 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
);
1131 r
= r100_cs_track_check(p
->rdev
, track
);
1136 case PACKET3_3D_DRAW_INDX
:
1137 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
+ 1);
1138 r
= r100_cs_track_check(p
->rdev
, track
);
1143 case PACKET3_3D_DRAW_INDX_2
:
1144 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
);
1145 r
= r100_cs_track_check(p
->rdev
, track
);
1153 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1159 int r300_cs_parse(struct radeon_cs_parser
*p
)
1161 struct radeon_cs_packet pkt
;
1162 struct r100_cs_track
*track
;
1165 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1166 r100_cs_track_clear(p
->rdev
, track
);
1169 r
= r100_cs_packet_parse(p
, &pkt
, p
->idx
);
1173 p
->idx
+= pkt
.count
+ 2;
1176 r
= r100_cs_parse_packet0(p
, &pkt
,
1177 p
->rdev
->config
.r300
.reg_safe_bm
,
1178 p
->rdev
->config
.r300
.reg_safe_bm_size
,
1179 &r300_packet0_check
);
1184 r
= r300_packet3_check(p
, &pkt
);
1187 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1193 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1197 void r300_set_reg_safe(struct radeon_device
*rdev
)
1199 rdev
->config
.r300
.reg_safe_bm
= r300_reg_safe_bm
;
1200 rdev
->config
.r300
.reg_safe_bm_size
= ARRAY_SIZE(r300_reg_safe_bm
);
1203 void r300_mc_program(struct radeon_device
*rdev
)
1205 struct r100_mc_save save
;
1208 r
= r100_debugfs_mc_info_init(rdev
);
1210 dev_err(rdev
->dev
, "Failed to create r100_mc debugfs file.\n");
1213 /* Stops all mc clients */
1214 r100_mc_stop(rdev
, &save
);
1215 if (rdev
->flags
& RADEON_IS_AGP
) {
1216 WREG32(R_00014C_MC_AGP_LOCATION
,
1217 S_00014C_MC_AGP_START(rdev
->mc
.gtt_start
>> 16) |
1218 S_00014C_MC_AGP_TOP(rdev
->mc
.gtt_end
>> 16));
1219 WREG32(R_000170_AGP_BASE
, lower_32_bits(rdev
->mc
.agp_base
));
1220 WREG32(R_00015C_AGP_BASE_2
,
1221 upper_32_bits(rdev
->mc
.agp_base
) & 0xff);
1223 WREG32(R_00014C_MC_AGP_LOCATION
, 0x0FFFFFFF);
1224 WREG32(R_000170_AGP_BASE
, 0);
1225 WREG32(R_00015C_AGP_BASE_2
, 0);
1227 /* Wait for mc idle */
1228 if (r300_mc_wait_for_idle(rdev
))
1229 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1230 /* Program MC, should be a 32bits limited address space */
1231 WREG32(R_000148_MC_FB_LOCATION
,
1232 S_000148_MC_FB_START(rdev
->mc
.vram_start
>> 16) |
1233 S_000148_MC_FB_TOP(rdev
->mc
.vram_end
>> 16));
1234 r100_mc_resume(rdev
, &save
);
1237 void r300_clock_startup(struct radeon_device
*rdev
)
1241 if (radeon_dynclks
!= -1 && radeon_dynclks
)
1242 radeon_legacy_set_clock_gating(rdev
, 1);
1243 /* We need to force on some of the block */
1244 tmp
= RREG32_PLL(R_00000D_SCLK_CNTL
);
1245 tmp
|= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1246 if ((rdev
->family
== CHIP_RV350
) || (rdev
->family
== CHIP_RV380
))
1247 tmp
|= S_00000D_FORCE_VAP(1);
1248 WREG32_PLL(R_00000D_SCLK_CNTL
, tmp
);
1251 static int r300_startup(struct radeon_device
*rdev
)
1255 /* set common regs */
1256 r100_set_common_regs(rdev
);
1258 r300_mc_program(rdev
);
1260 r300_clock_startup(rdev
);
1261 /* Initialize GPU configuration (# pipes, ...) */
1262 r300_gpu_init(rdev
);
1263 /* Initialize GART (initialize after TTM so we can allocate
1264 * memory through TTM but finalize after TTM) */
1265 if (rdev
->flags
& RADEON_IS_PCIE
) {
1266 r
= rv370_pcie_gart_enable(rdev
);
1271 if (rdev
->family
== CHIP_R300
||
1272 rdev
->family
== CHIP_R350
||
1273 rdev
->family
== CHIP_RV350
)
1274 r100_enable_bm(rdev
);
1276 if (rdev
->flags
& RADEON_IS_PCI
) {
1277 r
= r100_pci_gart_enable(rdev
);
1283 rdev
->config
.r300
.hdp_cntl
= RREG32(RADEON_HOST_PATH_CNTL
);
1284 /* 1M ring buffer */
1285 r
= r100_cp_init(rdev
, 1024 * 1024);
1287 dev_err(rdev
->dev
, "failled initializing CP (%d).\n", r
);
1290 r
= r100_wb_init(rdev
);
1292 dev_err(rdev
->dev
, "failled initializing WB (%d).\n", r
);
1293 r
= r100_ib_init(rdev
);
1295 dev_err(rdev
->dev
, "failled initializing IB (%d).\n", r
);
1301 int r300_resume(struct radeon_device
*rdev
)
1303 /* Make sur GART are not working */
1304 if (rdev
->flags
& RADEON_IS_PCIE
)
1305 rv370_pcie_gart_disable(rdev
);
1306 if (rdev
->flags
& RADEON_IS_PCI
)
1307 r100_pci_gart_disable(rdev
);
1308 /* Resume clock before doing reset */
1309 r300_clock_startup(rdev
);
1310 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1311 if (radeon_gpu_reset(rdev
)) {
1312 dev_warn(rdev
->dev
, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1313 RREG32(R_000E40_RBBM_STATUS
),
1314 RREG32(R_0007C0_CP_STAT
));
1317 radeon_combios_asic_init(rdev
->ddev
);
1318 /* Resume clock after posting */
1319 r300_clock_startup(rdev
);
1320 /* Initialize surface registers */
1321 radeon_surface_init(rdev
);
1322 return r300_startup(rdev
);
1325 int r300_suspend(struct radeon_device
*rdev
)
1327 r100_cp_disable(rdev
);
1328 r100_wb_disable(rdev
);
1329 r100_irq_disable(rdev
);
1330 if (rdev
->flags
& RADEON_IS_PCIE
)
1331 rv370_pcie_gart_disable(rdev
);
1332 if (rdev
->flags
& RADEON_IS_PCI
)
1333 r100_pci_gart_disable(rdev
);
1337 void r300_fini(struct radeon_device
*rdev
)
1339 radeon_pm_fini(rdev
);
1343 radeon_gem_fini(rdev
);
1344 if (rdev
->flags
& RADEON_IS_PCIE
)
1345 rv370_pcie_gart_fini(rdev
);
1346 if (rdev
->flags
& RADEON_IS_PCI
)
1347 r100_pci_gart_fini(rdev
);
1348 radeon_agp_fini(rdev
);
1349 radeon_irq_kms_fini(rdev
);
1350 radeon_fence_driver_fini(rdev
);
1351 radeon_bo_fini(rdev
);
1352 radeon_atombios_fini(rdev
);
1357 int r300_init(struct radeon_device
*rdev
)
1362 r100_vga_render_disable(rdev
);
1363 /* Initialize scratch registers */
1364 radeon_scratch_init(rdev
);
1365 /* Initialize surface registers */
1366 radeon_surface_init(rdev
);
1367 /* TODO: disable VGA need to use VGA request */
1369 if (!radeon_get_bios(rdev
)) {
1370 if (ASIC_IS_AVIVO(rdev
))
1373 if (rdev
->is_atom_bios
) {
1374 dev_err(rdev
->dev
, "Expecting combios for RS400/RS480 GPU\n");
1377 r
= radeon_combios_init(rdev
);
1381 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1382 if (radeon_gpu_reset(rdev
)) {
1384 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1385 RREG32(R_000E40_RBBM_STATUS
),
1386 RREG32(R_0007C0_CP_STAT
));
1388 /* check if cards are posted or not */
1389 if (radeon_boot_test_post_card(rdev
) == false)
1391 /* Set asic errata */
1393 /* Initialize clocks */
1394 radeon_get_clock_info(rdev
->ddev
);
1395 /* Initialize power management */
1396 radeon_pm_init(rdev
);
1397 /* initialize AGP */
1398 if (rdev
->flags
& RADEON_IS_AGP
) {
1399 r
= radeon_agp_init(rdev
);
1401 radeon_agp_disable(rdev
);
1404 /* initialize memory controller */
1407 r
= radeon_fence_driver_init(rdev
);
1410 r
= radeon_irq_kms_init(rdev
);
1413 /* Memory manager */
1414 r
= radeon_bo_init(rdev
);
1417 if (rdev
->flags
& RADEON_IS_PCIE
) {
1418 r
= rv370_pcie_gart_init(rdev
);
1422 if (rdev
->flags
& RADEON_IS_PCI
) {
1423 r
= r100_pci_gart_init(rdev
);
1427 r300_set_reg_safe(rdev
);
1428 rdev
->accel_working
= true;
1429 r
= r300_startup(rdev
);
1431 /* Somethings want wront with the accel init stop accel */
1432 dev_err(rdev
->dev
, "Disabling GPU acceleration\n");
1436 radeon_irq_kms_fini(rdev
);
1437 if (rdev
->flags
& RADEON_IS_PCIE
)
1438 rv370_pcie_gart_fini(rdev
);
1439 if (rdev
->flags
& RADEON_IS_PCI
)
1440 r100_pci_gart_fini(rdev
);
1441 radeon_agp_fini(rdev
);
1442 rdev
->accel_working
= false;