2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/slab.h>
32 #include <drm/drm_crtc_helper.h>
33 #include "radeon_reg.h"
35 #include "radeon_asic.h"
36 #include "radeon_drm.h"
37 #include "r100_track.h"
40 #include "r300_reg_safe.h"
42 /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
45 * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
46 * using MMIO to flush host path read cache, this lead to HARDLOCKUP.
47 * However, scheduling such write to the ring seems harmless, i suspect
48 * the CP read collide with the flush somehow, or maybe the MC, hard to
49 * tell. (Jerome Glisse)
53 * rv370,rv380 PCIE GART
55 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
);
57 void rv370_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
62 /* Workaround HW bug do flush 2 times */
63 for (i
= 0; i
< 2; i
++) {
64 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
65 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
| RADEON_PCIE_TX_GART_INVALIDATE_TLB
);
66 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
67 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
72 #define R300_PTE_WRITEABLE (1 << 2)
73 #define R300_PTE_READABLE (1 << 3)
75 int rv370_pcie_gart_set_page(struct radeon_device
*rdev
, int i
, uint64_t addr
)
77 void __iomem
*ptr
= rdev
->gart
.ptr
;
79 if (i
< 0 || i
> rdev
->gart
.num_gpu_pages
) {
82 addr
= (lower_32_bits(addr
) >> 8) |
83 ((upper_32_bits(addr
) & 0xff) << 24) |
84 R300_PTE_WRITEABLE
| R300_PTE_READABLE
;
85 /* on x86 we want this to be CPU endian, on powerpc
86 * on powerpc without HW swappers, it'll get swapped on way
87 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
88 writel(addr
, ((void __iomem
*)ptr
) + (i
* 4));
92 int rv370_pcie_gart_init(struct radeon_device
*rdev
)
96 if (rdev
->gart
.robj
) {
97 WARN(1, "RV370 PCIE GART already initialized\n");
100 /* Initialize common gart structure */
101 r
= radeon_gart_init(rdev
);
104 r
= rv370_debugfs_pcie_gart_info_init(rdev
);
106 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
107 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 4;
108 rdev
->asic
->gart_tlb_flush
= &rv370_pcie_gart_tlb_flush
;
109 rdev
->asic
->gart_set_page
= &rv370_pcie_gart_set_page
;
110 return radeon_gart_table_vram_alloc(rdev
);
113 int rv370_pcie_gart_enable(struct radeon_device
*rdev
)
119 if (rdev
->gart
.robj
== NULL
) {
120 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
123 r
= radeon_gart_table_vram_pin(rdev
);
126 radeon_gart_restore(rdev
);
127 /* discard memory request outside of configured range */
128 tmp
= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
129 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
130 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
, rdev
->mc
.gtt_start
);
131 tmp
= rdev
->mc
.gtt_end
& ~RADEON_GPU_PAGE_MASK
;
132 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
, tmp
);
133 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
, 0);
134 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
, 0);
135 table_addr
= rdev
->gart
.table_addr
;
136 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE
, table_addr
);
137 /* FIXME: setup default page */
138 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO
, rdev
->mc
.vram_start
);
139 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI
, 0);
141 WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR
, 0);
142 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
143 tmp
|= RADEON_PCIE_TX_GART_EN
;
144 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
145 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
146 rv370_pcie_gart_tlb_flush(rdev
);
147 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
148 (unsigned)(rdev
->mc
.gtt_size
>> 20),
149 (unsigned long long)table_addr
);
150 rdev
->gart
.ready
= true;
154 void rv370_pcie_gart_disable(struct radeon_device
*rdev
)
158 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
, 0);
159 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
, 0);
160 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
, 0);
161 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
, 0);
162 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
163 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
164 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
& ~RADEON_PCIE_TX_GART_EN
);
165 radeon_gart_table_vram_unpin(rdev
);
168 void rv370_pcie_gart_fini(struct radeon_device
*rdev
)
170 radeon_gart_fini(rdev
);
171 rv370_pcie_gart_disable(rdev
);
172 radeon_gart_table_vram_free(rdev
);
175 void r300_fence_ring_emit(struct radeon_device
*rdev
,
176 struct radeon_fence
*fence
)
178 /* Who ever call radeon_fence_emit should call ring_lock and ask
179 * for enough space (today caller are ib schedule and buffer move) */
180 /* Write SC register so SC & US assert idle */
181 radeon_ring_write(rdev
, PACKET0(R300_RE_SCISSORS_TL
, 0));
182 radeon_ring_write(rdev
, 0);
183 radeon_ring_write(rdev
, PACKET0(R300_RE_SCISSORS_BR
, 0));
184 radeon_ring_write(rdev
, 0);
186 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
187 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
);
188 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
189 radeon_ring_write(rdev
, R300_ZC_FLUSH
);
190 /* Wait until IDLE & CLEAN */
191 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
192 radeon_ring_write(rdev
, (RADEON_WAIT_3D_IDLECLEAN
|
193 RADEON_WAIT_2D_IDLECLEAN
|
194 RADEON_WAIT_DMA_GUI_IDLE
));
195 radeon_ring_write(rdev
, PACKET0(RADEON_HOST_PATH_CNTL
, 0));
196 radeon_ring_write(rdev
, rdev
->config
.r300
.hdp_cntl
|
197 RADEON_HDP_READ_BUFFER_INVALIDATE
);
198 radeon_ring_write(rdev
, PACKET0(RADEON_HOST_PATH_CNTL
, 0));
199 radeon_ring_write(rdev
, rdev
->config
.r300
.hdp_cntl
);
200 /* Emit fence sequence & fire IRQ */
201 radeon_ring_write(rdev
, PACKET0(rdev
->fence_drv
.scratch_reg
, 0));
202 radeon_ring_write(rdev
, fence
->seq
);
203 radeon_ring_write(rdev
, PACKET0(RADEON_GEN_INT_STATUS
, 0));
204 radeon_ring_write(rdev
, RADEON_SW_INT_FIRE
);
207 void r300_ring_start(struct radeon_device
*rdev
)
209 unsigned gb_tile_config
;
212 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
213 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
214 switch(rdev
->num_gb_pipes
) {
216 gb_tile_config
|= R300_PIPE_COUNT_R300
;
219 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
222 gb_tile_config
|= R300_PIPE_COUNT_R420
;
226 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
230 r
= radeon_ring_lock(rdev
, 64);
234 radeon_ring_write(rdev
, PACKET0(RADEON_ISYNC_CNTL
, 0));
235 radeon_ring_write(rdev
,
236 RADEON_ISYNC_ANY2D_IDLE3D
|
237 RADEON_ISYNC_ANY3D_IDLE2D
|
238 RADEON_ISYNC_WAIT_IDLEGUI
|
239 RADEON_ISYNC_CPSCRATCH_IDLEGUI
);
240 radeon_ring_write(rdev
, PACKET0(R300_GB_TILE_CONFIG
, 0));
241 radeon_ring_write(rdev
, gb_tile_config
);
242 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
243 radeon_ring_write(rdev
,
244 RADEON_WAIT_2D_IDLECLEAN
|
245 RADEON_WAIT_3D_IDLECLEAN
);
246 radeon_ring_write(rdev
, PACKET0(R300_DST_PIPE_CONFIG
, 0));
247 radeon_ring_write(rdev
, R300_PIPE_AUTO_CONFIG
);
248 radeon_ring_write(rdev
, PACKET0(R300_GB_SELECT
, 0));
249 radeon_ring_write(rdev
, 0);
250 radeon_ring_write(rdev
, PACKET0(R300_GB_ENABLE
, 0));
251 radeon_ring_write(rdev
, 0);
252 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
253 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
254 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
255 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
256 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
257 radeon_ring_write(rdev
,
258 RADEON_WAIT_2D_IDLECLEAN
|
259 RADEON_WAIT_3D_IDLECLEAN
);
260 radeon_ring_write(rdev
, PACKET0(R300_GB_AA_CONFIG
, 0));
261 radeon_ring_write(rdev
, 0);
262 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
263 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
264 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
265 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
266 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS0
, 0));
267 radeon_ring_write(rdev
,
268 ((6 << R300_MS_X0_SHIFT
) |
269 (6 << R300_MS_Y0_SHIFT
) |
270 (6 << R300_MS_X1_SHIFT
) |
271 (6 << R300_MS_Y1_SHIFT
) |
272 (6 << R300_MS_X2_SHIFT
) |
273 (6 << R300_MS_Y2_SHIFT
) |
274 (6 << R300_MSBD0_Y_SHIFT
) |
275 (6 << R300_MSBD0_X_SHIFT
)));
276 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS1
, 0));
277 radeon_ring_write(rdev
,
278 ((6 << R300_MS_X3_SHIFT
) |
279 (6 << R300_MS_Y3_SHIFT
) |
280 (6 << R300_MS_X4_SHIFT
) |
281 (6 << R300_MS_Y4_SHIFT
) |
282 (6 << R300_MS_X5_SHIFT
) |
283 (6 << R300_MS_Y5_SHIFT
) |
284 (6 << R300_MSBD1_SHIFT
)));
285 radeon_ring_write(rdev
, PACKET0(R300_GA_ENHANCE
, 0));
286 radeon_ring_write(rdev
, R300_GA_DEADLOCK_CNTL
| R300_GA_FASTSYNC_CNTL
);
287 radeon_ring_write(rdev
, PACKET0(R300_GA_POLY_MODE
, 0));
288 radeon_ring_write(rdev
,
289 R300_FRONT_PTYPE_TRIANGE
| R300_BACK_PTYPE_TRIANGE
);
290 radeon_ring_write(rdev
, PACKET0(R300_GA_ROUND_MODE
, 0));
291 radeon_ring_write(rdev
,
292 R300_GEOMETRY_ROUND_NEAREST
|
293 R300_COLOR_ROUND_NEAREST
);
294 radeon_ring_unlock_commit(rdev
);
297 void r300_errata(struct radeon_device
*rdev
)
299 rdev
->pll_errata
= 0;
301 if (rdev
->family
== CHIP_R300
&&
302 (RREG32(RADEON_CONFIG_CNTL
) & RADEON_CFG_ATI_REV_ID_MASK
) == RADEON_CFG_ATI_REV_A11
) {
303 rdev
->pll_errata
|= CHIP_ERRATA_R300_CG
;
307 int r300_mc_wait_for_idle(struct radeon_device
*rdev
)
312 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
314 tmp
= RREG32(RADEON_MC_STATUS
);
315 if (tmp
& R300_MC_IDLE
) {
323 void r300_gpu_init(struct radeon_device
*rdev
)
325 uint32_t gb_tile_config
, tmp
;
327 if ((rdev
->family
== CHIP_R300
&& rdev
->pdev
->device
!= 0x4144) ||
328 (rdev
->family
== CHIP_R350
&& rdev
->pdev
->device
!= 0x4148)) {
330 rdev
->num_gb_pipes
= 2;
332 /* rv350,rv370,rv380,r300 AD, r350 AH */
333 rdev
->num_gb_pipes
= 1;
335 rdev
->num_z_pipes
= 1;
336 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
337 switch (rdev
->num_gb_pipes
) {
339 gb_tile_config
|= R300_PIPE_COUNT_R300
;
342 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
345 gb_tile_config
|= R300_PIPE_COUNT_R420
;
349 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
352 WREG32(R300_GB_TILE_CONFIG
, gb_tile_config
);
354 if (r100_gui_wait_for_idle(rdev
)) {
355 printk(KERN_WARNING
"Failed to wait GUI idle while "
356 "programming pipes. Bad things might happen.\n");
359 tmp
= RREG32(R300_DST_PIPE_CONFIG
);
360 WREG32(R300_DST_PIPE_CONFIG
, tmp
| R300_PIPE_AUTO_CONFIG
);
362 WREG32(R300_RB2D_DSTCACHE_MODE
,
363 R300_DC_AUTOFLUSH_ENABLE
|
364 R300_DC_DC_DISABLE_IGNORE_PE
);
366 if (r100_gui_wait_for_idle(rdev
)) {
367 printk(KERN_WARNING
"Failed to wait GUI idle while "
368 "programming pipes. Bad things might happen.\n");
370 if (r300_mc_wait_for_idle(rdev
)) {
371 printk(KERN_WARNING
"Failed to wait MC idle while "
372 "programming pipes. Bad things might happen.\n");
374 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
375 rdev
->num_gb_pipes
, rdev
->num_z_pipes
);
378 bool r300_gpu_is_lockup(struct radeon_device
*rdev
)
383 rbbm_status
= RREG32(R_000E40_RBBM_STATUS
);
384 if (!G_000E40_GUI_ACTIVE(rbbm_status
)) {
385 r100_gpu_lockup_update(&rdev
->config
.r300
.lockup
, &rdev
->cp
);
388 /* force CP activities */
389 r
= radeon_ring_lock(rdev
, 2);
392 radeon_ring_write(rdev
, 0x80000000);
393 radeon_ring_write(rdev
, 0x80000000);
394 radeon_ring_unlock_commit(rdev
);
396 rdev
->cp
.rptr
= RREG32(RADEON_CP_RB_RPTR
);
397 return r100_gpu_cp_is_lockup(rdev
, &rdev
->config
.r300
.lockup
, &rdev
->cp
);
400 int r300_asic_reset(struct radeon_device
*rdev
)
402 struct r100_mc_save save
;
406 status
= RREG32(R_000E40_RBBM_STATUS
);
407 if (!G_000E40_GUI_ACTIVE(status
)) {
410 r100_mc_stop(rdev
, &save
);
411 status
= RREG32(R_000E40_RBBM_STATUS
);
412 dev_info(rdev
->dev
, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__
, __LINE__
, status
);
414 WREG32(RADEON_CP_CSQ_CNTL
, 0);
415 tmp
= RREG32(RADEON_CP_RB_CNTL
);
416 WREG32(RADEON_CP_RB_CNTL
, tmp
| RADEON_RB_RPTR_WR_ENA
);
417 WREG32(RADEON_CP_RB_RPTR_WR
, 0);
418 WREG32(RADEON_CP_RB_WPTR
, 0);
419 WREG32(RADEON_CP_RB_CNTL
, tmp
);
421 pci_save_state(rdev
->pdev
);
422 /* disable bus mastering */
423 r100_bm_disable(rdev
);
424 WREG32(R_0000F0_RBBM_SOFT_RESET
, S_0000F0_SOFT_RESET_VAP(1) |
425 S_0000F0_SOFT_RESET_GA(1));
426 RREG32(R_0000F0_RBBM_SOFT_RESET
);
428 WREG32(R_0000F0_RBBM_SOFT_RESET
, 0);
430 status
= RREG32(R_000E40_RBBM_STATUS
);
431 dev_info(rdev
->dev
, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__
, __LINE__
, status
);
432 /* resetting the CP seems to be problematic sometimes it end up
433 * hard locking the computer, but it's necessary for successful
434 * reset more test & playing is needed on R3XX/R4XX to find a
435 * reliable (if any solution)
437 WREG32(R_0000F0_RBBM_SOFT_RESET
, S_0000F0_SOFT_RESET_CP(1));
438 RREG32(R_0000F0_RBBM_SOFT_RESET
);
440 WREG32(R_0000F0_RBBM_SOFT_RESET
, 0);
442 status
= RREG32(R_000E40_RBBM_STATUS
);
443 dev_info(rdev
->dev
, "(%s:%d) RBBM_STATUS=0x%08X\n", __func__
, __LINE__
, status
);
444 /* restore PCI & busmastering */
445 pci_restore_state(rdev
->pdev
);
446 r100_enable_bm(rdev
);
447 /* Check if GPU is idle */
448 if (G_000E40_GA_BUSY(status
) || G_000E40_VAP_BUSY(status
)) {
449 dev_err(rdev
->dev
, "failed to reset GPU\n");
450 rdev
->gpu_lockup
= true;
453 dev_info(rdev
->dev
, "GPU reset succeed\n");
454 r100_mc_resume(rdev
, &save
);
459 * r300,r350,rv350,rv380 VRAM info
461 void r300_mc_init(struct radeon_device
*rdev
)
466 /* DDR for all card after R300 & IGP */
467 rdev
->mc
.vram_is_ddr
= true;
468 tmp
= RREG32(RADEON_MEM_CNTL
);
469 tmp
&= R300_MEM_NUM_CHANNELS_MASK
;
471 case 0: rdev
->mc
.vram_width
= 64; break;
472 case 1: rdev
->mc
.vram_width
= 128; break;
473 case 2: rdev
->mc
.vram_width
= 256; break;
474 default: rdev
->mc
.vram_width
= 128; break;
476 r100_vram_init_sizes(rdev
);
477 base
= rdev
->mc
.aper_base
;
478 if (rdev
->flags
& RADEON_IS_IGP
)
479 base
= (RREG32(RADEON_NB_TOM
) & 0xffff) << 16;
480 radeon_vram_location(rdev
, &rdev
->mc
, base
);
481 rdev
->mc
.gtt_base_align
= 0;
482 if (!(rdev
->flags
& RADEON_IS_AGP
))
483 radeon_gtt_location(rdev
, &rdev
->mc
);
484 radeon_update_bandwidth_info(rdev
);
487 void rv370_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
489 uint32_t link_width_cntl
, mask
;
491 if (rdev
->flags
& RADEON_IS_IGP
)
494 if (!(rdev
->flags
& RADEON_IS_PCIE
))
497 /* FIXME wait for idle */
501 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
504 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
507 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
510 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
513 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
516 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
520 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
524 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
526 if ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) ==
527 (mask
<< RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
))
530 link_width_cntl
&= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK
|
531 RADEON_PCIE_LC_RECONFIG_NOW
|
532 RADEON_PCIE_LC_RECONFIG_LATER
|
533 RADEON_PCIE_LC_SHORT_RECONFIG_EN
);
534 link_width_cntl
|= mask
;
535 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
536 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, (link_width_cntl
|
537 RADEON_PCIE_LC_RECONFIG_NOW
));
539 /* wait for lane set to complete */
540 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
541 while (link_width_cntl
== 0xffffffff)
542 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
546 int rv370_get_pcie_lanes(struct radeon_device
*rdev
)
550 if (rdev
->flags
& RADEON_IS_IGP
)
553 if (!(rdev
->flags
& RADEON_IS_PCIE
))
556 /* FIXME wait for idle */
558 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
560 switch ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
) {
561 case RADEON_PCIE_LC_LINK_WIDTH_X0
:
563 case RADEON_PCIE_LC_LINK_WIDTH_X1
:
565 case RADEON_PCIE_LC_LINK_WIDTH_X2
:
567 case RADEON_PCIE_LC_LINK_WIDTH_X4
:
569 case RADEON_PCIE_LC_LINK_WIDTH_X8
:
571 case RADEON_PCIE_LC_LINK_WIDTH_X16
:
577 #if defined(CONFIG_DEBUG_FS)
578 static int rv370_debugfs_pcie_gart_info(struct seq_file
*m
, void *data
)
580 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
581 struct drm_device
*dev
= node
->minor
->dev
;
582 struct radeon_device
*rdev
= dev
->dev_private
;
585 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
586 seq_printf(m
, "PCIE_TX_GART_CNTL 0x%08x\n", tmp
);
587 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_BASE
);
588 seq_printf(m
, "PCIE_TX_GART_BASE 0x%08x\n", tmp
);
589 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
);
590 seq_printf(m
, "PCIE_TX_GART_START_LO 0x%08x\n", tmp
);
591 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
);
592 seq_printf(m
, "PCIE_TX_GART_START_HI 0x%08x\n", tmp
);
593 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
);
594 seq_printf(m
, "PCIE_TX_GART_END_LO 0x%08x\n", tmp
);
595 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
);
596 seq_printf(m
, "PCIE_TX_GART_END_HI 0x%08x\n", tmp
);
597 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR
);
598 seq_printf(m
, "PCIE_TX_GART_ERROR 0x%08x\n", tmp
);
602 static struct drm_info_list rv370_pcie_gart_info_list
[] = {
603 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info
, 0, NULL
},
607 static int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
)
609 #if defined(CONFIG_DEBUG_FS)
610 return radeon_debugfs_add_files(rdev
, rv370_pcie_gart_info_list
, 1);
616 static int r300_packet0_check(struct radeon_cs_parser
*p
,
617 struct radeon_cs_packet
*pkt
,
618 unsigned idx
, unsigned reg
)
620 struct radeon_cs_reloc
*reloc
;
621 struct r100_cs_track
*track
;
622 volatile uint32_t *ib
;
623 uint32_t tmp
, tile_flags
= 0;
629 track
= (struct r100_cs_track
*)p
->track
;
630 idx_value
= radeon_get_ib_value(p
, idx
);
633 case AVIVO_D1MODE_VLINE_START_END
:
634 case RADEON_CRTC_GUI_TRIG_VLINE
:
635 r
= r100_cs_packet_parse_vline(p
);
637 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
639 r100_cs_dump_packet(p
, pkt
);
643 case RADEON_DST_PITCH_OFFSET
:
644 case RADEON_SRC_PITCH_OFFSET
:
645 r
= r100_reloc_pitch_offset(p
, pkt
, idx
, reg
);
649 case R300_RB3D_COLOROFFSET0
:
650 case R300_RB3D_COLOROFFSET1
:
651 case R300_RB3D_COLOROFFSET2
:
652 case R300_RB3D_COLOROFFSET3
:
653 i
= (reg
- R300_RB3D_COLOROFFSET0
) >> 2;
654 r
= r100_cs_packet_next_reloc(p
, &reloc
);
656 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
658 r100_cs_dump_packet(p
, pkt
);
661 track
->cb
[i
].robj
= reloc
->robj
;
662 track
->cb
[i
].offset
= idx_value
;
663 track
->cb_dirty
= true;
664 ib
[idx
] = idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
666 case R300_ZB_DEPTHOFFSET
:
667 r
= r100_cs_packet_next_reloc(p
, &reloc
);
669 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
671 r100_cs_dump_packet(p
, pkt
);
674 track
->zb
.robj
= reloc
->robj
;
675 track
->zb
.offset
= idx_value
;
676 track
->zb_dirty
= true;
677 ib
[idx
] = idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
679 case R300_TX_OFFSET_0
:
680 case R300_TX_OFFSET_0
+4:
681 case R300_TX_OFFSET_0
+8:
682 case R300_TX_OFFSET_0
+12:
683 case R300_TX_OFFSET_0
+16:
684 case R300_TX_OFFSET_0
+20:
685 case R300_TX_OFFSET_0
+24:
686 case R300_TX_OFFSET_0
+28:
687 case R300_TX_OFFSET_0
+32:
688 case R300_TX_OFFSET_0
+36:
689 case R300_TX_OFFSET_0
+40:
690 case R300_TX_OFFSET_0
+44:
691 case R300_TX_OFFSET_0
+48:
692 case R300_TX_OFFSET_0
+52:
693 case R300_TX_OFFSET_0
+56:
694 case R300_TX_OFFSET_0
+60:
695 i
= (reg
- R300_TX_OFFSET_0
) >> 2;
696 r
= r100_cs_packet_next_reloc(p
, &reloc
);
698 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
700 r100_cs_dump_packet(p
, pkt
);
704 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
705 tile_flags
|= R300_TXO_MACRO_TILE
;
706 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
707 tile_flags
|= R300_TXO_MICRO_TILE
;
708 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
709 tile_flags
|= R300_TXO_MICRO_TILE_SQUARE
;
711 tmp
= idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
714 track
->textures
[i
].robj
= reloc
->robj
;
715 track
->tex_dirty
= true;
717 /* Tracked registers */
720 track
->vap_vf_cntl
= idx_value
;
724 track
->vtx_size
= idx_value
& 0x7F;
727 /* VAP_VF_MAX_VTX_INDX */
728 track
->max_indx
= idx_value
& 0x00FFFFFFUL
;
731 /* VAP_ALT_NUM_VERTICES - only valid on r500 */
732 if (p
->rdev
->family
< CHIP_RV515
)
734 track
->vap_alt_nverts
= idx_value
& 0xFFFFFF;
738 track
->maxy
= ((idx_value
>> 13) & 0x1FFF) + 1;
739 if (p
->rdev
->family
< CHIP_RV515
) {
742 track
->cb_dirty
= true;
743 track
->zb_dirty
= true;
747 if ((idx_value
& (1 << 10)) && /* CMASK_ENABLE */
748 p
->rdev
->cmask_filp
!= p
->filp
) {
749 DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n");
752 track
->num_cb
= ((idx_value
>> 5) & 0x3) + 1;
753 track
->cb_dirty
= true;
759 /* RB3D_COLORPITCH0 */
760 /* RB3D_COLORPITCH1 */
761 /* RB3D_COLORPITCH2 */
762 /* RB3D_COLORPITCH3 */
763 r
= r100_cs_packet_next_reloc(p
, &reloc
);
765 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
767 r100_cs_dump_packet(p
, pkt
);
771 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
772 tile_flags
|= R300_COLOR_TILE_ENABLE
;
773 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
774 tile_flags
|= R300_COLOR_MICROTILE_ENABLE
;
775 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
776 tile_flags
|= R300_COLOR_MICROTILE_SQUARE_ENABLE
;
778 tmp
= idx_value
& ~(0x7 << 16);
781 i
= (reg
- 0x4E38) >> 2;
782 track
->cb
[i
].pitch
= idx_value
& 0x3FFE;
783 switch (((idx_value
>> 21) & 0xF)) {
787 track
->cb
[i
].cpp
= 1;
793 track
->cb
[i
].cpp
= 2;
796 if (p
->rdev
->family
< CHIP_RV515
) {
797 DRM_ERROR("Invalid color buffer format (%d)!\n",
798 ((idx_value
>> 21) & 0xF));
803 track
->cb
[i
].cpp
= 4;
806 track
->cb
[i
].cpp
= 8;
809 track
->cb
[i
].cpp
= 16;
812 DRM_ERROR("Invalid color buffer format (%d) !\n",
813 ((idx_value
>> 21) & 0xF));
816 track
->cb_dirty
= true;
821 track
->z_enabled
= true;
823 track
->z_enabled
= false;
825 track
->zb_dirty
= true;
829 switch ((idx_value
& 0xF)) {
838 DRM_ERROR("Invalid z buffer format (%d) !\n",
842 track
->zb_dirty
= true;
846 r
= r100_cs_packet_next_reloc(p
, &reloc
);
848 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
850 r100_cs_dump_packet(p
, pkt
);
854 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
855 tile_flags
|= R300_DEPTHMACROTILE_ENABLE
;
856 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
857 tile_flags
|= R300_DEPTHMICROTILE_TILED
;
858 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO_SQUARE
)
859 tile_flags
|= R300_DEPTHMICROTILE_TILED_SQUARE
;
861 tmp
= idx_value
& ~(0x7 << 16);
865 track
->zb
.pitch
= idx_value
& 0x3FFC;
866 track
->zb_dirty
= true;
870 for (i
= 0; i
< 16; i
++) {
873 enabled
= !!(idx_value
& (1 << i
));
874 track
->textures
[i
].enabled
= enabled
;
876 track
->tex_dirty
= true;
894 /* TX_FORMAT1_[0-15] */
895 i
= (reg
- 0x44C0) >> 2;
896 tmp
= (idx_value
>> 25) & 0x3;
897 track
->textures
[i
].tex_coord_type
= tmp
;
898 switch ((idx_value
& 0x1F)) {
899 case R300_TX_FORMAT_X8
:
900 case R300_TX_FORMAT_Y4X4
:
901 case R300_TX_FORMAT_Z3Y3X2
:
902 track
->textures
[i
].cpp
= 1;
903 track
->textures
[i
].compress_format
= R100_TRACK_COMP_NONE
;
905 case R300_TX_FORMAT_X16
:
906 case R300_TX_FORMAT_FL_I16
:
907 case R300_TX_FORMAT_Y8X8
:
908 case R300_TX_FORMAT_Z5Y6X5
:
909 case R300_TX_FORMAT_Z6Y5X5
:
910 case R300_TX_FORMAT_W4Z4Y4X4
:
911 case R300_TX_FORMAT_W1Z5Y5X5
:
912 case R300_TX_FORMAT_D3DMFT_CxV8U8
:
913 case R300_TX_FORMAT_B8G8_B8G8
:
914 case R300_TX_FORMAT_G8R8_G8B8
:
915 track
->textures
[i
].cpp
= 2;
916 track
->textures
[i
].compress_format
= R100_TRACK_COMP_NONE
;
918 case R300_TX_FORMAT_Y16X16
:
919 case R300_TX_FORMAT_FL_I16A16
:
920 case R300_TX_FORMAT_Z11Y11X10
:
921 case R300_TX_FORMAT_Z10Y11X11
:
922 case R300_TX_FORMAT_W8Z8Y8X8
:
923 case R300_TX_FORMAT_W2Z10Y10X10
:
925 case R300_TX_FORMAT_FL_I32
:
927 track
->textures
[i
].cpp
= 4;
928 track
->textures
[i
].compress_format
= R100_TRACK_COMP_NONE
;
930 case R300_TX_FORMAT_W16Z16Y16X16
:
931 case R300_TX_FORMAT_FL_R16G16B16A16
:
932 case R300_TX_FORMAT_FL_I32A32
:
933 track
->textures
[i
].cpp
= 8;
934 track
->textures
[i
].compress_format
= R100_TRACK_COMP_NONE
;
936 case R300_TX_FORMAT_FL_R32G32B32A32
:
937 track
->textures
[i
].cpp
= 16;
938 track
->textures
[i
].compress_format
= R100_TRACK_COMP_NONE
;
940 case R300_TX_FORMAT_DXT1
:
941 track
->textures
[i
].cpp
= 1;
942 track
->textures
[i
].compress_format
= R100_TRACK_COMP_DXT1
;
944 case R300_TX_FORMAT_ATI2N
:
945 if (p
->rdev
->family
< CHIP_R420
) {
946 DRM_ERROR("Invalid texture format %u\n",
950 /* The same rules apply as for DXT3/5. */
952 case R300_TX_FORMAT_DXT3
:
953 case R300_TX_FORMAT_DXT5
:
954 track
->textures
[i
].cpp
= 1;
955 track
->textures
[i
].compress_format
= R100_TRACK_COMP_DXT35
;
958 DRM_ERROR("Invalid texture format %u\n",
962 track
->tex_dirty
= true;
980 /* TX_FILTER0_[0-15] */
981 i
= (reg
- 0x4400) >> 2;
982 tmp
= idx_value
& 0x7;
983 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
984 track
->textures
[i
].roundup_w
= false;
986 tmp
= (idx_value
>> 3) & 0x7;
987 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
988 track
->textures
[i
].roundup_h
= false;
990 track
->tex_dirty
= true;
1008 /* TX_FORMAT2_[0-15] */
1009 i
= (reg
- 0x4500) >> 2;
1010 tmp
= idx_value
& 0x3FFF;
1011 track
->textures
[i
].pitch
= tmp
+ 1;
1012 if (p
->rdev
->family
>= CHIP_RV515
) {
1013 tmp
= ((idx_value
>> 15) & 1) << 11;
1014 track
->textures
[i
].width_11
= tmp
;
1015 tmp
= ((idx_value
>> 16) & 1) << 11;
1016 track
->textures
[i
].height_11
= tmp
;
1019 if (idx_value
& (1 << 14)) {
1020 /* The same rules apply as for DXT1. */
1021 track
->textures
[i
].compress_format
=
1022 R100_TRACK_COMP_DXT1
;
1024 } else if (idx_value
& (1 << 14)) {
1025 DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
1028 track
->tex_dirty
= true;
1046 /* TX_FORMAT0_[0-15] */
1047 i
= (reg
- 0x4480) >> 2;
1048 tmp
= idx_value
& 0x7FF;
1049 track
->textures
[i
].width
= tmp
+ 1;
1050 tmp
= (idx_value
>> 11) & 0x7FF;
1051 track
->textures
[i
].height
= tmp
+ 1;
1052 tmp
= (idx_value
>> 26) & 0xF;
1053 track
->textures
[i
].num_levels
= tmp
;
1054 tmp
= idx_value
& (1 << 31);
1055 track
->textures
[i
].use_pitch
= !!tmp
;
1056 tmp
= (idx_value
>> 22) & 0xF;
1057 track
->textures
[i
].txdepth
= tmp
;
1058 track
->tex_dirty
= true;
1060 case R300_ZB_ZPASS_ADDR
:
1061 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1063 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1065 r100_cs_dump_packet(p
, pkt
);
1068 ib
[idx
] = idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
1071 /* RB3D_COLOR_CHANNEL_MASK */
1072 track
->color_channel_mask
= idx_value
;
1073 track
->cb_dirty
= true;
1077 /* r300c emits this register - we need to disable hyperz for it
1078 * without complaining */
1079 if (p
->rdev
->hyperz_filp
!= p
->filp
) {
1080 if (idx_value
& 0x1)
1081 ib
[idx
] = idx_value
& ~1;
1086 track
->zb_cb_clear
= !!(idx_value
& (1 << 5));
1087 track
->cb_dirty
= true;
1088 track
->zb_dirty
= true;
1089 if (p
->rdev
->hyperz_filp
!= p
->filp
) {
1090 if (idx_value
& (R300_HIZ_ENABLE
|
1091 R300_RD_COMP_ENABLE
|
1092 R300_WR_COMP_ENABLE
|
1093 R300_FAST_FILL_ENABLE
))
1098 /* RB3D_BLENDCNTL */
1099 track
->blend_read_enable
= !!(idx_value
& (1 << 2));
1100 track
->cb_dirty
= true;
1102 case R300_RB3D_AARESOLVE_OFFSET
:
1103 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1105 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1107 r100_cs_dump_packet(p
, pkt
);
1110 track
->aa
.robj
= reloc
->robj
;
1111 track
->aa
.offset
= idx_value
;
1112 track
->aa_dirty
= true;
1113 ib
[idx
] = idx_value
+ ((u32
)reloc
->lobj
.gpu_offset
);
1115 case R300_RB3D_AARESOLVE_PITCH
:
1116 track
->aa
.pitch
= idx_value
& 0x3FFE;
1117 track
->aa_dirty
= true;
1119 case R300_RB3D_AARESOLVE_CTL
:
1120 track
->aaresolve
= idx_value
& 0x1;
1121 track
->aa_dirty
= true;
1123 case 0x4f30: /* ZB_MASK_OFFSET */
1124 case 0x4f34: /* ZB_ZMASK_PITCH */
1125 case 0x4f44: /* ZB_HIZ_OFFSET */
1126 case 0x4f54: /* ZB_HIZ_PITCH */
1127 if (idx_value
&& (p
->rdev
->hyperz_filp
!= p
->filp
))
1131 if (idx_value
&& (p
->rdev
->hyperz_filp
!= p
->filp
))
1133 /* GB_Z_PEQ_CONFIG */
1134 if (p
->rdev
->family
>= CHIP_RV350
)
1139 /* valid register only on RV530 */
1140 if (p
->rdev
->family
== CHIP_RV530
)
1142 /* fallthrough do not move */
1148 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d (val=%08x)\n",
1149 reg
, idx
, idx_value
);
1153 static int r300_packet3_check(struct radeon_cs_parser
*p
,
1154 struct radeon_cs_packet
*pkt
)
1156 struct radeon_cs_reloc
*reloc
;
1157 struct r100_cs_track
*track
;
1158 volatile uint32_t *ib
;
1164 track
= (struct r100_cs_track
*)p
->track
;
1165 switch(pkt
->opcode
) {
1166 case PACKET3_3D_LOAD_VBPNTR
:
1167 r
= r100_packet3_load_vbpntr(p
, pkt
, idx
);
1171 case PACKET3_INDX_BUFFER
:
1172 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1174 DRM_ERROR("No reloc for packet3 %d\n", pkt
->opcode
);
1175 r100_cs_dump_packet(p
, pkt
);
1178 ib
[idx
+1] = radeon_get_ib_value(p
, idx
+ 1) + ((u32
)reloc
->lobj
.gpu_offset
);
1179 r
= r100_cs_track_check_pkt3_indx_buffer(p
, pkt
, reloc
->robj
);
1185 case PACKET3_3D_DRAW_IMMD
:
1186 /* Number of dwords is vtx_size * (num_vertices - 1)
1187 * PRIM_WALK must be equal to 3 vertex data in embedded
1189 if (((radeon_get_ib_value(p
, idx
+ 1) >> 4) & 0x3) != 3) {
1190 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1193 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
+ 1);
1194 track
->immd_dwords
= pkt
->count
- 1;
1195 r
= r100_cs_track_check(p
->rdev
, track
);
1200 case PACKET3_3D_DRAW_IMMD_2
:
1201 /* Number of dwords is vtx_size * (num_vertices - 1)
1202 * PRIM_WALK must be equal to 3 vertex data in embedded
1204 if (((radeon_get_ib_value(p
, idx
) >> 4) & 0x3) != 3) {
1205 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1208 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
);
1209 track
->immd_dwords
= pkt
->count
;
1210 r
= r100_cs_track_check(p
->rdev
, track
);
1215 case PACKET3_3D_DRAW_VBUF
:
1216 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
+ 1);
1217 r
= r100_cs_track_check(p
->rdev
, track
);
1222 case PACKET3_3D_DRAW_VBUF_2
:
1223 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
);
1224 r
= r100_cs_track_check(p
->rdev
, track
);
1229 case PACKET3_3D_DRAW_INDX
:
1230 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
+ 1);
1231 r
= r100_cs_track_check(p
->rdev
, track
);
1236 case PACKET3_3D_DRAW_INDX_2
:
1237 track
->vap_vf_cntl
= radeon_get_ib_value(p
, idx
);
1238 r
= r100_cs_track_check(p
->rdev
, track
);
1243 case PACKET3_3D_CLEAR_HIZ
:
1244 case PACKET3_3D_CLEAR_ZMASK
:
1245 if (p
->rdev
->hyperz_filp
!= p
->filp
)
1248 case PACKET3_3D_CLEAR_CMASK
:
1249 if (p
->rdev
->cmask_filp
!= p
->filp
)
1255 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1261 int r300_cs_parse(struct radeon_cs_parser
*p
)
1263 struct radeon_cs_packet pkt
;
1264 struct r100_cs_track
*track
;
1267 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1270 r100_cs_track_clear(p
->rdev
, track
);
1273 r
= r100_cs_packet_parse(p
, &pkt
, p
->idx
);
1277 p
->idx
+= pkt
.count
+ 2;
1280 r
= r100_cs_parse_packet0(p
, &pkt
,
1281 p
->rdev
->config
.r300
.reg_safe_bm
,
1282 p
->rdev
->config
.r300
.reg_safe_bm_size
,
1283 &r300_packet0_check
);
1288 r
= r300_packet3_check(p
, &pkt
);
1291 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1297 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1301 void r300_set_reg_safe(struct radeon_device
*rdev
)
1303 rdev
->config
.r300
.reg_safe_bm
= r300_reg_safe_bm
;
1304 rdev
->config
.r300
.reg_safe_bm_size
= ARRAY_SIZE(r300_reg_safe_bm
);
1307 void r300_mc_program(struct radeon_device
*rdev
)
1309 struct r100_mc_save save
;
1312 r
= r100_debugfs_mc_info_init(rdev
);
1314 dev_err(rdev
->dev
, "Failed to create r100_mc debugfs file.\n");
1317 /* Stops all mc clients */
1318 r100_mc_stop(rdev
, &save
);
1319 if (rdev
->flags
& RADEON_IS_AGP
) {
1320 WREG32(R_00014C_MC_AGP_LOCATION
,
1321 S_00014C_MC_AGP_START(rdev
->mc
.gtt_start
>> 16) |
1322 S_00014C_MC_AGP_TOP(rdev
->mc
.gtt_end
>> 16));
1323 WREG32(R_000170_AGP_BASE
, lower_32_bits(rdev
->mc
.agp_base
));
1324 WREG32(R_00015C_AGP_BASE_2
,
1325 upper_32_bits(rdev
->mc
.agp_base
) & 0xff);
1327 WREG32(R_00014C_MC_AGP_LOCATION
, 0x0FFFFFFF);
1328 WREG32(R_000170_AGP_BASE
, 0);
1329 WREG32(R_00015C_AGP_BASE_2
, 0);
1331 /* Wait for mc idle */
1332 if (r300_mc_wait_for_idle(rdev
))
1333 DRM_INFO("Failed to wait MC idle before programming MC.\n");
1334 /* Program MC, should be a 32bits limited address space */
1335 WREG32(R_000148_MC_FB_LOCATION
,
1336 S_000148_MC_FB_START(rdev
->mc
.vram_start
>> 16) |
1337 S_000148_MC_FB_TOP(rdev
->mc
.vram_end
>> 16));
1338 r100_mc_resume(rdev
, &save
);
1341 void r300_clock_startup(struct radeon_device
*rdev
)
1345 if (radeon_dynclks
!= -1 && radeon_dynclks
)
1346 radeon_legacy_set_clock_gating(rdev
, 1);
1347 /* We need to force on some of the block */
1348 tmp
= RREG32_PLL(R_00000D_SCLK_CNTL
);
1349 tmp
|= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1);
1350 if ((rdev
->family
== CHIP_RV350
) || (rdev
->family
== CHIP_RV380
))
1351 tmp
|= S_00000D_FORCE_VAP(1);
1352 WREG32_PLL(R_00000D_SCLK_CNTL
, tmp
);
1355 static int r300_startup(struct radeon_device
*rdev
)
1359 /* set common regs */
1360 r100_set_common_regs(rdev
);
1362 r300_mc_program(rdev
);
1364 r300_clock_startup(rdev
);
1365 /* Initialize GPU configuration (# pipes, ...) */
1366 r300_gpu_init(rdev
);
1367 /* Initialize GART (initialize after TTM so we can allocate
1368 * memory through TTM but finalize after TTM) */
1369 if (rdev
->flags
& RADEON_IS_PCIE
) {
1370 r
= rv370_pcie_gart_enable(rdev
);
1375 if (rdev
->family
== CHIP_R300
||
1376 rdev
->family
== CHIP_R350
||
1377 rdev
->family
== CHIP_RV350
)
1378 r100_enable_bm(rdev
);
1380 if (rdev
->flags
& RADEON_IS_PCI
) {
1381 r
= r100_pci_gart_enable(rdev
);
1386 /* allocate wb buffer */
1387 r
= radeon_wb_init(rdev
);
1393 rdev
->config
.r300
.hdp_cntl
= RREG32(RADEON_HOST_PATH_CNTL
);
1394 /* 1M ring buffer */
1395 r
= r100_cp_init(rdev
, 1024 * 1024);
1397 dev_err(rdev
->dev
, "failed initializing CP (%d).\n", r
);
1400 r
= r100_ib_init(rdev
);
1402 dev_err(rdev
->dev
, "failed initializing IB (%d).\n", r
);
1408 int r300_resume(struct radeon_device
*rdev
)
1410 /* Make sur GART are not working */
1411 if (rdev
->flags
& RADEON_IS_PCIE
)
1412 rv370_pcie_gart_disable(rdev
);
1413 if (rdev
->flags
& RADEON_IS_PCI
)
1414 r100_pci_gart_disable(rdev
);
1415 /* Resume clock before doing reset */
1416 r300_clock_startup(rdev
);
1417 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1418 if (radeon_asic_reset(rdev
)) {
1419 dev_warn(rdev
->dev
, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1420 RREG32(R_000E40_RBBM_STATUS
),
1421 RREG32(R_0007C0_CP_STAT
));
1424 radeon_combios_asic_init(rdev
->ddev
);
1425 /* Resume clock after posting */
1426 r300_clock_startup(rdev
);
1427 /* Initialize surface registers */
1428 radeon_surface_init(rdev
);
1429 return r300_startup(rdev
);
1432 int r300_suspend(struct radeon_device
*rdev
)
1434 r100_cp_disable(rdev
);
1435 radeon_wb_disable(rdev
);
1436 r100_irq_disable(rdev
);
1437 if (rdev
->flags
& RADEON_IS_PCIE
)
1438 rv370_pcie_gart_disable(rdev
);
1439 if (rdev
->flags
& RADEON_IS_PCI
)
1440 r100_pci_gart_disable(rdev
);
1444 void r300_fini(struct radeon_device
*rdev
)
1447 radeon_wb_fini(rdev
);
1449 radeon_gem_fini(rdev
);
1450 if (rdev
->flags
& RADEON_IS_PCIE
)
1451 rv370_pcie_gart_fini(rdev
);
1452 if (rdev
->flags
& RADEON_IS_PCI
)
1453 r100_pci_gart_fini(rdev
);
1454 radeon_agp_fini(rdev
);
1455 radeon_irq_kms_fini(rdev
);
1456 radeon_fence_driver_fini(rdev
);
1457 radeon_bo_fini(rdev
);
1458 radeon_atombios_fini(rdev
);
1463 int r300_init(struct radeon_device
*rdev
)
1468 r100_vga_render_disable(rdev
);
1469 /* Initialize scratch registers */
1470 radeon_scratch_init(rdev
);
1471 /* Initialize surface registers */
1472 radeon_surface_init(rdev
);
1473 /* TODO: disable VGA need to use VGA request */
1474 /* restore some register to sane defaults */
1475 r100_restore_sanity(rdev
);
1477 if (!radeon_get_bios(rdev
)) {
1478 if (ASIC_IS_AVIVO(rdev
))
1481 if (rdev
->is_atom_bios
) {
1482 dev_err(rdev
->dev
, "Expecting combios for RS400/RS480 GPU\n");
1485 r
= radeon_combios_init(rdev
);
1489 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
1490 if (radeon_asic_reset(rdev
)) {
1492 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
1493 RREG32(R_000E40_RBBM_STATUS
),
1494 RREG32(R_0007C0_CP_STAT
));
1496 /* check if cards are posted or not */
1497 if (radeon_boot_test_post_card(rdev
) == false)
1499 /* Set asic errata */
1501 /* Initialize clocks */
1502 radeon_get_clock_info(rdev
->ddev
);
1503 /* initialize AGP */
1504 if (rdev
->flags
& RADEON_IS_AGP
) {
1505 r
= radeon_agp_init(rdev
);
1507 radeon_agp_disable(rdev
);
1510 /* initialize memory controller */
1513 r
= radeon_fence_driver_init(rdev
);
1516 r
= radeon_irq_kms_init(rdev
);
1519 /* Memory manager */
1520 r
= radeon_bo_init(rdev
);
1523 if (rdev
->flags
& RADEON_IS_PCIE
) {
1524 r
= rv370_pcie_gart_init(rdev
);
1528 if (rdev
->flags
& RADEON_IS_PCI
) {
1529 r
= r100_pci_gart_init(rdev
);
1533 r300_set_reg_safe(rdev
);
1534 rdev
->accel_working
= true;
1535 r
= r300_startup(rdev
);
1537 /* Somethings want wront with the accel init stop accel */
1538 dev_err(rdev
->dev
, "Disabling GPU acceleration\n");
1540 radeon_wb_fini(rdev
);
1542 radeon_irq_kms_fini(rdev
);
1543 if (rdev
->flags
& RADEON_IS_PCIE
)
1544 rv370_pcie_gart_fini(rdev
);
1545 if (rdev
->flags
& RADEON_IS_PCI
)
1546 r100_pci_gart_fini(rdev
);
1547 radeon_agp_fini(rdev
);
1548 rdev
->accel_working
= false;