Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. | |
3 | * Copyright 2008 Red Hat Inc. | |
4 | * Copyright 2009 Jerome Glisse. | |
5 | * | |
6 | * Permission is hereby granted, free of charge, to any person obtaining a | |
7 | * copy of this software and associated documentation files (the "Software"), | |
8 | * to deal in the Software without restriction, including without limitation | |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
10 | * and/or sell copies of the Software, and to permit persons to whom the | |
11 | * Software is furnished to do so, subject to the following conditions: | |
12 | * | |
13 | * The above copyright notice and this permission notice shall be included in | |
14 | * all copies or substantial portions of the Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
22 | * OTHER DEALINGS IN THE SOFTWARE. | |
23 | * | |
24 | * Authors: Dave Airlie | |
25 | * Alex Deucher | |
26 | * Jerome Glisse | |
27 | */ | |
28 | #include <linux/seq_file.h> | |
29 | #include "drmP.h" | |
30 | #include "radeon_reg.h" | |
31 | #include "radeon.h" | |
9f022ddf | 32 | #include "atom.h" |
905b6822 | 33 | #include "r420d.h" |
771fe6b9 | 34 | |
771fe6b9 JG |
35 | int r420_mc_init(struct radeon_device *rdev) |
36 | { | |
37 | int r; | |
38 | ||
771fe6b9 JG |
39 | /* Setup GPU memory space */ |
40 | rdev->mc.vram_location = 0xFFFFFFFFUL; | |
41 | rdev->mc.gtt_location = 0xFFFFFFFFUL; | |
42 | if (rdev->flags & RADEON_IS_AGP) { | |
43 | r = radeon_agp_init(rdev); | |
44 | if (r) { | |
45 | printk(KERN_WARNING "[drm] Disabling AGP\n"); | |
46 | rdev->flags &= ~RADEON_IS_AGP; | |
47 | rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; | |
48 | } else { | |
49 | rdev->mc.gtt_location = rdev->mc.agp_base; | |
50 | } | |
51 | } | |
52 | r = radeon_mc_setup(rdev); | |
53 | if (r) { | |
54 | return r; | |
55 | } | |
771fe6b9 JG |
56 | return 0; |
57 | } | |
58 | ||
771fe6b9 JG |
59 | void r420_pipes_init(struct radeon_device *rdev) |
60 | { | |
61 | unsigned tmp; | |
62 | unsigned gb_pipe_select; | |
63 | unsigned num_pipes; | |
64 | ||
65 | /* GA_ENHANCE workaround TCL deadlock issue */ | |
66 | WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3)); | |
18a4cd2e DA |
67 | /* add idle wait as per freedesktop.org bug 24041 */ |
68 | if (r100_gui_wait_for_idle(rdev)) { | |
69 | printk(KERN_WARNING "Failed to wait GUI idle while " | |
70 | "programming pipes. Bad things might happen.\n"); | |
71 | } | |
771fe6b9 JG |
72 | /* get max number of pipes */ |
73 | gb_pipe_select = RREG32(0x402C); | |
74 | num_pipes = ((gb_pipe_select >> 12) & 3) + 1; | |
75 | rdev->num_gb_pipes = num_pipes; | |
76 | tmp = 0; | |
77 | switch (num_pipes) { | |
78 | default: | |
79 | /* force to 1 pipe */ | |
80 | num_pipes = 1; | |
81 | case 1: | |
82 | tmp = (0 << 1); | |
83 | break; | |
84 | case 2: | |
85 | tmp = (3 << 1); | |
86 | break; | |
87 | case 3: | |
88 | tmp = (6 << 1); | |
89 | break; | |
90 | case 4: | |
91 | tmp = (7 << 1); | |
92 | break; | |
93 | } | |
94 | WREG32(0x42C8, (1 << num_pipes) - 1); | |
95 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ | |
96 | tmp |= (1 << 4) | (1 << 0); | |
97 | WREG32(0x4018, tmp); | |
98 | if (r100_gui_wait_for_idle(rdev)) { | |
99 | printk(KERN_WARNING "Failed to wait GUI idle while " | |
100 | "programming pipes. Bad things might happen.\n"); | |
101 | } | |
102 | ||
103 | tmp = RREG32(0x170C); | |
104 | WREG32(0x170C, tmp | (1 << 31)); | |
105 | ||
106 | WREG32(R300_RB2D_DSTCACHE_MODE, | |
107 | RREG32(R300_RB2D_DSTCACHE_MODE) | | |
108 | R300_DC_AUTOFLUSH_ENABLE | | |
109 | R300_DC_DC_DISABLE_IGNORE_PE); | |
110 | ||
111 | if (r100_gui_wait_for_idle(rdev)) { | |
112 | printk(KERN_WARNING "Failed to wait GUI idle while " | |
113 | "programming pipes. Bad things might happen.\n"); | |
114 | } | |
f779b3e5 AD |
115 | |
116 | if (rdev->family == CHIP_RV530) { | |
117 | tmp = RREG32(RV530_GB_PIPE_SELECT2); | |
118 | if ((tmp & 3) == 3) | |
119 | rdev->num_z_pipes = 2; | |
120 | else | |
121 | rdev->num_z_pipes = 1; | |
122 | } else | |
123 | rdev->num_z_pipes = 1; | |
124 | ||
125 | DRM_INFO("radeon: %d quad pipes, %d z pipes initialized.\n", | |
126 | rdev->num_gb_pipes, rdev->num_z_pipes); | |
771fe6b9 JG |
127 | } |
128 | ||
9f022ddf | 129 | u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg) |
771fe6b9 | 130 | { |
9f022ddf JG |
131 | u32 r; |
132 | ||
133 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg)); | |
134 | r = RREG32(R_0001FC_MC_IND_DATA); | |
135 | return r; | |
136 | } | |
137 | ||
138 | void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |
139 | { | |
140 | WREG32(R_0001F8_MC_IND_INDEX, S_0001F8_MC_IND_ADDR(reg) | | |
141 | S_0001F8_MC_IND_WR_EN(1)); | |
142 | WREG32(R_0001FC_MC_IND_DATA, v); | |
143 | } | |
144 | ||
145 | static void r420_debugfs(struct radeon_device *rdev) | |
146 | { | |
147 | if (r100_debugfs_rbbm_init(rdev)) { | |
148 | DRM_ERROR("Failed to register debugfs file for RBBM !\n"); | |
149 | } | |
150 | if (r420_debugfs_pipes_info_init(rdev)) { | |
151 | DRM_ERROR("Failed to register debugfs file for pipes !\n"); | |
152 | } | |
153 | } | |
154 | ||
155 | static void r420_clock_resume(struct radeon_device *rdev) | |
156 | { | |
157 | u32 sclk_cntl; | |
158 | sclk_cntl = RREG32_PLL(R_00000D_SCLK_CNTL); | |
159 | sclk_cntl |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); | |
160 | if (rdev->family == CHIP_R420) | |
161 | sclk_cntl |= S_00000D_FORCE_PX(1) | S_00000D_FORCE_TX(1); | |
162 | WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl); | |
163 | } | |
164 | ||
fc30b8ef | 165 | static int r420_startup(struct radeon_device *rdev) |
9f022ddf JG |
166 | { |
167 | int r; | |
168 | ||
9f022ddf JG |
169 | r300_mc_program(rdev); |
170 | /* Initialize GART (initialize after TTM so we can allocate | |
171 | * memory through TTM but finalize after TTM) */ | |
4aac0473 JG |
172 | if (rdev->flags & RADEON_IS_PCIE) { |
173 | r = rv370_pcie_gart_enable(rdev); | |
174 | if (r) | |
175 | return r; | |
176 | } | |
177 | if (rdev->flags & RADEON_IS_PCI) { | |
178 | r = r100_pci_gart_enable(rdev); | |
179 | if (r) | |
180 | return r; | |
9f022ddf | 181 | } |
771fe6b9 | 182 | r420_pipes_init(rdev); |
9f022ddf JG |
183 | /* Enable IRQ */ |
184 | rdev->irq.sw_int = true; | |
185 | r100_irq_set(rdev); | |
186 | /* 1M ring buffer */ | |
187 | r = r100_cp_init(rdev, 1024 * 1024); | |
188 | if (r) { | |
189 | dev_err(rdev->dev, "failled initializing CP (%d).\n", r); | |
190 | return r; | |
191 | } | |
192 | r = r100_wb_init(rdev); | |
193 | if (r) { | |
194 | dev_err(rdev->dev, "failled initializing WB (%d).\n", r); | |
771fe6b9 | 195 | } |
9f022ddf JG |
196 | r = r100_ib_init(rdev); |
197 | if (r) { | |
198 | dev_err(rdev->dev, "failled initializing IB (%d).\n", r); | |
199 | return r; | |
200 | } | |
201 | return 0; | |
771fe6b9 JG |
202 | } |
203 | ||
fc30b8ef DA |
204 | int r420_resume(struct radeon_device *rdev) |
205 | { | |
206 | /* Make sur GART are not working */ | |
207 | if (rdev->flags & RADEON_IS_PCIE) | |
208 | rv370_pcie_gart_disable(rdev); | |
209 | if (rdev->flags & RADEON_IS_PCI) | |
210 | r100_pci_gart_disable(rdev); | |
211 | /* Resume clock before doing reset */ | |
212 | r420_clock_resume(rdev); | |
213 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | |
214 | if (radeon_gpu_reset(rdev)) { | |
215 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | |
216 | RREG32(R_000E40_RBBM_STATUS), | |
217 | RREG32(R_0007C0_CP_STAT)); | |
218 | } | |
219 | /* check if cards are posted or not */ | |
220 | if (rdev->is_atom_bios) { | |
221 | atom_asic_init(rdev->mode_info.atom_context); | |
222 | } else { | |
223 | radeon_combios_asic_init(rdev->ddev); | |
224 | } | |
225 | /* Resume clock after posting */ | |
226 | r420_clock_resume(rdev); | |
227 | ||
228 | return r420_startup(rdev); | |
229 | } | |
230 | ||
9f022ddf JG |
231 | int r420_suspend(struct radeon_device *rdev) |
232 | { | |
233 | r100_cp_disable(rdev); | |
234 | r100_wb_disable(rdev); | |
235 | r100_irq_disable(rdev); | |
4aac0473 JG |
236 | if (rdev->flags & RADEON_IS_PCIE) |
237 | rv370_pcie_gart_disable(rdev); | |
238 | if (rdev->flags & RADEON_IS_PCI) | |
239 | r100_pci_gart_disable(rdev); | |
9f022ddf JG |
240 | return 0; |
241 | } | |
771fe6b9 | 242 | |
9f022ddf | 243 | void r420_fini(struct radeon_device *rdev) |
771fe6b9 | 244 | { |
9f022ddf JG |
245 | r100_cp_fini(rdev); |
246 | r100_wb_fini(rdev); | |
247 | r100_ib_fini(rdev); | |
248 | radeon_gem_fini(rdev); | |
4aac0473 JG |
249 | if (rdev->flags & RADEON_IS_PCIE) |
250 | rv370_pcie_gart_fini(rdev); | |
251 | if (rdev->flags & RADEON_IS_PCI) | |
252 | r100_pci_gart_fini(rdev); | |
9f022ddf JG |
253 | radeon_agp_fini(rdev); |
254 | radeon_irq_kms_fini(rdev); | |
255 | radeon_fence_driver_fini(rdev); | |
256 | radeon_object_fini(rdev); | |
257 | if (rdev->is_atom_bios) { | |
258 | radeon_atombios_fini(rdev); | |
259 | } else { | |
260 | radeon_combios_fini(rdev); | |
261 | } | |
262 | kfree(rdev->bios); | |
263 | rdev->bios = NULL; | |
771fe6b9 JG |
264 | } |
265 | ||
9f022ddf JG |
266 | int r420_init(struct radeon_device *rdev) |
267 | { | |
268 | int r; | |
269 | ||
270 | rdev->new_init_path = true; | |
271 | /* Initialize scratch registers */ | |
272 | radeon_scratch_init(rdev); | |
273 | /* Initialize surface registers */ | |
274 | radeon_surface_init(rdev); | |
275 | /* TODO: disable VGA need to use VGA request */ | |
276 | /* BIOS*/ | |
277 | if (!radeon_get_bios(rdev)) { | |
278 | if (ASIC_IS_AVIVO(rdev)) | |
279 | return -EINVAL; | |
280 | } | |
281 | if (rdev->is_atom_bios) { | |
282 | r = radeon_atombios_init(rdev); | |
283 | if (r) { | |
284 | return r; | |
285 | } | |
286 | } else { | |
287 | r = radeon_combios_init(rdev); | |
288 | if (r) { | |
289 | return r; | |
290 | } | |
291 | } | |
292 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ | |
293 | if (radeon_gpu_reset(rdev)) { | |
294 | dev_warn(rdev->dev, | |
295 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n", | |
296 | RREG32(R_000E40_RBBM_STATUS), | |
297 | RREG32(R_0007C0_CP_STAT)); | |
298 | } | |
299 | /* check if cards are posted or not */ | |
300 | if (!radeon_card_posted(rdev) && rdev->bios) { | |
301 | DRM_INFO("GPU not posted. posting now...\n"); | |
302 | if (rdev->is_atom_bios) { | |
303 | atom_asic_init(rdev->mode_info.atom_context); | |
304 | } else { | |
305 | radeon_combios_asic_init(rdev->ddev); | |
306 | } | |
307 | } | |
308 | /* Initialize clocks */ | |
309 | radeon_get_clock_info(rdev->ddev); | |
310 | /* Get vram informations */ | |
311 | r300_vram_info(rdev); | |
312 | /* Initialize memory controller (also test AGP) */ | |
313 | r = r420_mc_init(rdev); | |
314 | if (r) { | |
315 | return r; | |
316 | } | |
317 | r420_debugfs(rdev); | |
318 | /* Fence driver */ | |
319 | r = radeon_fence_driver_init(rdev); | |
320 | if (r) { | |
321 | return r; | |
322 | } | |
323 | r = radeon_irq_kms_init(rdev); | |
324 | if (r) { | |
325 | return r; | |
326 | } | |
327 | /* Memory manager */ | |
328 | r = radeon_object_init(rdev); | |
329 | if (r) { | |
330 | return r; | |
331 | } | |
4aac0473 JG |
332 | if (rdev->flags & RADEON_IS_PCIE) { |
333 | r = rv370_pcie_gart_init(rdev); | |
334 | if (r) | |
335 | return r; | |
336 | } | |
337 | if (rdev->flags & RADEON_IS_PCI) { | |
338 | r = r100_pci_gart_init(rdev); | |
339 | if (r) | |
340 | return r; | |
341 | } | |
9f022ddf | 342 | r300_set_reg_safe(rdev); |
733289c2 | 343 | rdev->accel_working = true; |
fc30b8ef | 344 | r = r420_startup(rdev); |
9f022ddf JG |
345 | if (r) { |
346 | /* Somethings want wront with the accel init stop accel */ | |
347 | dev_err(rdev->dev, "Disabling GPU acceleration\n"); | |
348 | r420_suspend(rdev); | |
349 | r100_cp_fini(rdev); | |
350 | r100_wb_fini(rdev); | |
351 | r100_ib_fini(rdev); | |
4aac0473 JG |
352 | if (rdev->flags & RADEON_IS_PCIE) |
353 | rv370_pcie_gart_fini(rdev); | |
354 | if (rdev->flags & RADEON_IS_PCI) | |
355 | r100_pci_gart_fini(rdev); | |
9f022ddf JG |
356 | radeon_agp_fini(rdev); |
357 | radeon_irq_kms_fini(rdev); | |
733289c2 | 358 | rdev->accel_working = false; |
9f022ddf JG |
359 | } |
360 | return 0; | |
361 | } | |
771fe6b9 JG |
362 | |
363 | /* | |
364 | * Debugfs info | |
365 | */ | |
366 | #if defined(CONFIG_DEBUG_FS) | |
367 | static int r420_debugfs_pipes_info(struct seq_file *m, void *data) | |
368 | { | |
369 | struct drm_info_node *node = (struct drm_info_node *) m->private; | |
370 | struct drm_device *dev = node->minor->dev; | |
371 | struct radeon_device *rdev = dev->dev_private; | |
372 | uint32_t tmp; | |
373 | ||
374 | tmp = RREG32(R400_GB_PIPE_SELECT); | |
375 | seq_printf(m, "GB_PIPE_SELECT 0x%08x\n", tmp); | |
376 | tmp = RREG32(R300_GB_TILE_CONFIG); | |
377 | seq_printf(m, "GB_TILE_CONFIG 0x%08x\n", tmp); | |
378 | tmp = RREG32(R300_DST_PIPE_CONFIG); | |
379 | seq_printf(m, "DST_PIPE_CONFIG 0x%08x\n", tmp); | |
380 | return 0; | |
381 | } | |
382 | ||
383 | static struct drm_info_list r420_pipes_info_list[] = { | |
384 | {"r420_pipes_info", r420_debugfs_pipes_info, 0, NULL}, | |
385 | }; | |
386 | #endif | |
387 | ||
388 | int r420_debugfs_pipes_info_init(struct radeon_device *rdev) | |
389 | { | |
390 | #if defined(CONFIG_DEBUG_FS) | |
391 | return radeon_debugfs_add_files(rdev, r420_pipes_info_list, 1); | |
392 | #else | |
393 | return 0; | |
394 | #endif | |
395 | } |