drm/radeon/kms: DCE6 disp eng pll updates
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
cb28bb34
AD
92 "TAHITI",
93 "PITCAIRN",
94 "VERDE",
1b5331d9
JG
95 "LAST",
96};
97
b1e3a6d1
MD
98/*
99 * Clear GPU surface registers.
100 */
3ce0a23d 101void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
102{
103 /* FIXME: check this out */
104 if (rdev->family < CHIP_R600) {
105 int i;
106
550e2d92
DA
107 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
108 if (rdev->surface_regs[i].bo)
109 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
110 else
111 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 112 }
e024e110
DA
113 /* enable surfaces */
114 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
115 }
116}
117
771fe6b9
JG
118/*
119 * GPU scratch registers helpers function.
120 */
3ce0a23d 121void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
122{
123 int i;
124
125 /* FIXME: check this out */
126 if (rdev->family < CHIP_R300) {
127 rdev->scratch.num_reg = 5;
128 } else {
129 rdev->scratch.num_reg = 7;
130 }
724c80e1 131 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
132 for (i = 0; i < rdev->scratch.num_reg; i++) {
133 rdev->scratch.free[i] = true;
724c80e1 134 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
135 }
136}
137
138int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
139{
140 int i;
141
142 for (i = 0; i < rdev->scratch.num_reg; i++) {
143 if (rdev->scratch.free[i]) {
144 rdev->scratch.free[i] = false;
145 *reg = rdev->scratch.reg[i];
146 return 0;
147 }
148 }
149 return -EINVAL;
150}
151
152void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
153{
154 int i;
155
156 for (i = 0; i < rdev->scratch.num_reg; i++) {
157 if (rdev->scratch.reg[i] == reg) {
158 rdev->scratch.free[i] = true;
159 return;
160 }
161 }
162}
163
724c80e1
AD
164void radeon_wb_disable(struct radeon_device *rdev)
165{
166 int r;
167
168 if (rdev->wb.wb_obj) {
169 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
170 if (unlikely(r != 0))
171 return;
172 radeon_bo_kunmap(rdev->wb.wb_obj);
173 radeon_bo_unpin(rdev->wb.wb_obj);
174 radeon_bo_unreserve(rdev->wb.wb_obj);
175 }
176 rdev->wb.enabled = false;
177}
178
179void radeon_wb_fini(struct radeon_device *rdev)
180{
181 radeon_wb_disable(rdev);
182 if (rdev->wb.wb_obj) {
183 radeon_bo_unref(&rdev->wb.wb_obj);
184 rdev->wb.wb = NULL;
185 rdev->wb.wb_obj = NULL;
186 }
187}
188
189int radeon_wb_init(struct radeon_device *rdev)
190{
191 int r;
192
193 if (rdev->wb.wb_obj == NULL) {
441921d5 194 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
724c80e1
AD
195 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
196 if (r) {
197 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
198 return r;
199 }
200 }
201 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
202 if (unlikely(r != 0)) {
203 radeon_wb_fini(rdev);
204 return r;
205 }
206 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
207 &rdev->wb.gpu_addr);
208 if (r) {
209 radeon_bo_unreserve(rdev->wb.wb_obj);
210 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
211 radeon_wb_fini(rdev);
212 return r;
213 }
214 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
215 radeon_bo_unreserve(rdev->wb.wb_obj);
216 if (r) {
217 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
218 radeon_wb_fini(rdev);
219 return r;
220 }
221
e6ba7599
AD
222 /* clear wb memory */
223 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
224 /* disable event_write fences */
225 rdev->wb.use_event = false;
724c80e1
AD
226 /* disabled via module param */
227 if (radeon_no_wb == 1)
228 rdev->wb.enabled = false;
229 else {
724c80e1 230 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
231 /* often unreliable on AGP */
232 rdev->wb.enabled = false;
233 } else if (rdev->family < CHIP_R300) {
234 /* often unreliable on pre-r300 */
724c80e1 235 rdev->wb.enabled = false;
d0f8a854 236 } else {
724c80e1 237 rdev->wb.enabled = true;
d0f8a854
AD
238 /* event_write fences are only available on r600+ */
239 if (rdev->family >= CHIP_R600)
240 rdev->wb.use_event = true;
241 }
724c80e1 242 }
7d52785d
AD
243 /* always use writeback/events on NI */
244 if (ASIC_IS_DCE5(rdev)) {
245 rdev->wb.enabled = true;
246 rdev->wb.use_event = true;
247 }
724c80e1
AD
248
249 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
250
251 return 0;
252}
253
d594e46a
JG
254/**
255 * radeon_vram_location - try to find VRAM location
256 * @rdev: radeon device structure holding all necessary informations
257 * @mc: memory controller structure holding memory informations
258 * @base: base address at which to put VRAM
259 *
260 * Function will place try to place VRAM at base address provided
261 * as parameter (which is so far either PCI aperture address or
262 * for IGP TOM base address).
263 *
264 * If there is not enough space to fit the unvisible VRAM in the 32bits
265 * address space then we limit the VRAM size to the aperture.
266 *
267 * If we are using AGP and if the AGP aperture doesn't allow us to have
268 * room for all the VRAM than we restrict the VRAM to the PCI aperture
269 * size and print a warning.
270 *
271 * This function will never fails, worst case are limiting VRAM.
272 *
273 * Note: GTT start, end, size should be initialized before calling this
274 * function on AGP platform.
275 *
25985edc 276 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
277 * this shouldn't be a problem as we are using the PCI aperture as a reference.
278 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
279 * not IGP.
280 *
281 * Note: we use mc_vram_size as on some board we need to program the mc to
282 * cover the whole aperture even if VRAM size is inferior to aperture size
283 * Novell bug 204882 + along with lots of ubuntu ones
284 *
285 * Note: when limiting vram it's safe to overwritte real_vram_size because
286 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
287 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
288 * ones)
289 *
290 * Note: IGP TOM addr should be the same as the aperture addr, we don't
291 * explicitly check for that thought.
292 *
293 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 294 */
d594e46a 295void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 296{
d594e46a
JG
297 mc->vram_start = base;
298 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
299 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
300 mc->real_vram_size = mc->aper_size;
301 mc->mc_vram_size = mc->aper_size;
302 }
303 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 304 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
305 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
306 mc->real_vram_size = mc->aper_size;
307 mc->mc_vram_size = mc->aper_size;
308 }
309 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
ba95c45a
MD
310 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
311 mc->real_vram_size = radeon_vram_limit;
dd7cc55a 312 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
313 mc->mc_vram_size >> 20, mc->vram_start,
314 mc->vram_end, mc->real_vram_size >> 20);
315}
771fe6b9 316
d594e46a
JG
317/**
318 * radeon_gtt_location - try to find GTT location
319 * @rdev: radeon device structure holding all necessary informations
320 * @mc: memory controller structure holding memory informations
321 *
322 * Function will place try to place GTT before or after VRAM.
323 *
324 * If GTT size is bigger than space left then we ajust GTT size.
325 * Thus function will never fails.
326 *
327 * FIXME: when reducing GTT size align new size on power of 2.
328 */
329void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
330{
331 u64 size_af, size_bf;
332
8d369bb1
AD
333 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
334 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
335 if (size_bf > size_af) {
336 if (mc->gtt_size > size_bf) {
337 dev_warn(rdev->dev, "limiting GTT\n");
338 mc->gtt_size = size_bf;
771fe6b9 339 }
8d369bb1 340 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 341 } else {
d594e46a
JG
342 if (mc->gtt_size > size_af) {
343 dev_warn(rdev->dev, "limiting GTT\n");
344 mc->gtt_size = size_af;
345 }
8d369bb1 346 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 347 }
d594e46a 348 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 349 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 350 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
351}
352
771fe6b9
JG
353/*
354 * GPU helpers function.
355 */
9f022ddf 356bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
357{
358 uint32_t reg;
359
bcc65fd8
MG
360 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
361 return false;
362
771fe6b9 363 /* first check CRTCs */
18007401
AD
364 if (ASIC_IS_DCE41(rdev)) {
365 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
366 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
367 if (reg & EVERGREEN_CRTC_MASTER_EN)
368 return true;
369 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
370 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
371 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
372 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
373 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
374 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
375 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
376 if (reg & EVERGREEN_CRTC_MASTER_EN)
377 return true;
378 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
379 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
380 RREG32(AVIVO_D2CRTC_CONTROL);
381 if (reg & AVIVO_CRTC_EN) {
382 return true;
383 }
384 } else {
385 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
386 RREG32(RADEON_CRTC2_GEN_CNTL);
387 if (reg & RADEON_CRTC_EN) {
388 return true;
389 }
390 }
391
392 /* then check MEM_SIZE, in case the crtcs are off */
393 if (rdev->family >= CHIP_R600)
394 reg = RREG32(R600_CONFIG_MEMSIZE);
395 else
396 reg = RREG32(RADEON_CONFIG_MEMSIZE);
397
398 if (reg)
399 return true;
400
401 return false;
402
403}
404
f47299c5
AD
405void radeon_update_bandwidth_info(struct radeon_device *rdev)
406{
407 fixed20_12 a;
8807286e
AD
408 u32 sclk = rdev->pm.current_sclk;
409 u32 mclk = rdev->pm.current_mclk;
f47299c5 410
8807286e
AD
411 /* sclk/mclk in Mhz */
412 a.full = dfixed_const(100);
413 rdev->pm.sclk.full = dfixed_const(sclk);
414 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
415 rdev->pm.mclk.full = dfixed_const(mclk);
416 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 417
8807286e 418 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 419 a.full = dfixed_const(16);
f47299c5 420 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 421 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
422 }
423}
424
72542d77
DA
425bool radeon_boot_test_post_card(struct radeon_device *rdev)
426{
427 if (radeon_card_posted(rdev))
428 return true;
429
430 if (rdev->bios) {
431 DRM_INFO("GPU not posted. posting now...\n");
432 if (rdev->is_atom_bios)
433 atom_asic_init(rdev->mode_info.atom_context);
434 else
435 radeon_combios_asic_init(rdev->ddev);
436 return true;
437 } else {
438 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
439 return false;
440 }
441}
442
3ce0a23d
JG
443int radeon_dummy_page_init(struct radeon_device *rdev)
444{
82568565
DA
445 if (rdev->dummy_page.page)
446 return 0;
3ce0a23d
JG
447 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
448 if (rdev->dummy_page.page == NULL)
449 return -ENOMEM;
450 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
451 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
452 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
453 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
454 __free_page(rdev->dummy_page.page);
455 rdev->dummy_page.page = NULL;
456 return -ENOMEM;
457 }
458 return 0;
459}
460
461void radeon_dummy_page_fini(struct radeon_device *rdev)
462{
463 if (rdev->dummy_page.page == NULL)
464 return;
465 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
466 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
467 __free_page(rdev->dummy_page.page);
468 rdev->dummy_page.page = NULL;
469}
470
771fe6b9 471
771fe6b9
JG
472/* ATOM accessor methods */
473static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
474{
475 struct radeon_device *rdev = info->dev->dev_private;
476 uint32_t r;
477
478 r = rdev->pll_rreg(rdev, reg);
479 return r;
480}
481
482static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
483{
484 struct radeon_device *rdev = info->dev->dev_private;
485
486 rdev->pll_wreg(rdev, reg, val);
487}
488
489static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
490{
491 struct radeon_device *rdev = info->dev->dev_private;
492 uint32_t r;
493
494 r = rdev->mc_rreg(rdev, reg);
495 return r;
496}
497
498static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
499{
500 struct radeon_device *rdev = info->dev->dev_private;
501
502 rdev->mc_wreg(rdev, reg, val);
503}
504
505static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
506{
507 struct radeon_device *rdev = info->dev->dev_private;
508
509 WREG32(reg*4, val);
510}
511
512static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
513{
514 struct radeon_device *rdev = info->dev->dev_private;
515 uint32_t r;
516
517 r = RREG32(reg*4);
518 return r;
519}
520
351a52a2
AD
521static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
522{
523 struct radeon_device *rdev = info->dev->dev_private;
524
525 WREG32_IO(reg*4, val);
526}
527
528static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
529{
530 struct radeon_device *rdev = info->dev->dev_private;
531 uint32_t r;
532
533 r = RREG32_IO(reg*4);
534 return r;
535}
536
771fe6b9
JG
537int radeon_atombios_init(struct radeon_device *rdev)
538{
61c4b24b
MF
539 struct card_info *atom_card_info =
540 kzalloc(sizeof(struct card_info), GFP_KERNEL);
541
542 if (!atom_card_info)
543 return -ENOMEM;
544
545 rdev->mode_info.atom_card_info = atom_card_info;
546 atom_card_info->dev = rdev->ddev;
547 atom_card_info->reg_read = cail_reg_read;
548 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
549 /* needed for iio ops */
550 if (rdev->rio_mem) {
551 atom_card_info->ioreg_read = cail_ioreg_read;
552 atom_card_info->ioreg_write = cail_ioreg_write;
553 } else {
554 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
555 atom_card_info->ioreg_read = cail_reg_read;
556 atom_card_info->ioreg_write = cail_reg_write;
557 }
61c4b24b
MF
558 atom_card_info->mc_read = cail_mc_read;
559 atom_card_info->mc_write = cail_mc_write;
560 atom_card_info->pll_read = cail_pll_read;
561 atom_card_info->pll_write = cail_pll_write;
562
563 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
c31ad97f 564 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 565 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 566 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
567 return 0;
568}
569
570void radeon_atombios_fini(struct radeon_device *rdev)
571{
4a04a844
JG
572 if (rdev->mode_info.atom_context) {
573 kfree(rdev->mode_info.atom_context->scratch);
574 kfree(rdev->mode_info.atom_context);
575 }
61c4b24b 576 kfree(rdev->mode_info.atom_card_info);
771fe6b9
JG
577}
578
579int radeon_combios_init(struct radeon_device *rdev)
580{
581 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
582 return 0;
583}
584
585void radeon_combios_fini(struct radeon_device *rdev)
586{
587}
588
28d52043
DA
589/* if we get transitioned to only one device, tak VGA back */
590static unsigned int radeon_vga_set_decode(void *cookie, bool state)
591{
592 struct radeon_device *rdev = cookie;
28d52043
DA
593 radeon_vga_set_state(rdev, state);
594 if (state)
595 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
596 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
597 else
598 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
599}
c1176d6f 600
36421338
JG
601void radeon_check_arguments(struct radeon_device *rdev)
602{
603 /* vramlimit must be a power of two */
604 switch (radeon_vram_limit) {
605 case 0:
606 case 4:
607 case 8:
608 case 16:
609 case 32:
610 case 64:
611 case 128:
612 case 256:
613 case 512:
614 case 1024:
615 case 2048:
616 case 4096:
617 break;
618 default:
619 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
620 radeon_vram_limit);
621 radeon_vram_limit = 0;
622 break;
623 }
624 radeon_vram_limit = radeon_vram_limit << 20;
625 /* gtt size must be power of two and greater or equal to 32M */
626 switch (radeon_gart_size) {
627 case 4:
628 case 8:
629 case 16:
630 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
631 radeon_gart_size);
632 radeon_gart_size = 512;
633 break;
634 case 32:
635 case 64:
636 case 128:
637 case 256:
638 case 512:
639 case 1024:
640 case 2048:
641 case 4096:
642 break;
643 default:
644 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
645 radeon_gart_size);
646 radeon_gart_size = 512;
647 break;
648 }
649 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
650 /* AGP mode can only be -1, 1, 2, 4, 8 */
651 switch (radeon_agpmode) {
652 case -1:
653 case 0:
654 case 1:
655 case 2:
656 case 4:
657 case 8:
658 break;
659 default:
660 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
661 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
662 radeon_agpmode = 0;
663 break;
664 }
665}
666
6a9ee8af
DA
667static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
668{
669 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
670 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
671 if (state == VGA_SWITCHEROO_ON) {
672 printk(KERN_INFO "radeon: switched on\n");
673 /* don't suspend or resume card normally */
5bcf719b 674 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 675 radeon_resume_kms(dev);
5bcf719b 676 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 677 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
678 } else {
679 printk(KERN_INFO "radeon: switched off\n");
fbf81762 680 drm_kms_helper_poll_disable(dev);
5bcf719b 681 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 682 radeon_suspend_kms(dev, pmm);
5bcf719b 683 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
684 }
685}
686
687static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
688{
689 struct drm_device *dev = pci_get_drvdata(pdev);
690 bool can_switch;
691
692 spin_lock(&dev->count_lock);
693 can_switch = (dev->open_count == 0);
694 spin_unlock(&dev->count_lock);
695 return can_switch;
696}
697
698
771fe6b9
JG
699int radeon_device_init(struct radeon_device *rdev,
700 struct drm_device *ddev,
701 struct pci_dev *pdev,
702 uint32_t flags)
703{
351a52a2 704 int r, i;
ad49f501 705 int dma_bits;
771fe6b9 706
771fe6b9 707 rdev->shutdown = false;
9f022ddf 708 rdev->dev = &pdev->dev;
771fe6b9
JG
709 rdev->ddev = ddev;
710 rdev->pdev = pdev;
711 rdev->flags = flags;
712 rdev->family = flags & RADEON_FAMILY_MASK;
713 rdev->is_atom_bios = false;
714 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
715 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
716 rdev->gpu_lockup = false;
733289c2 717 rdev->accel_working = false;
1b5331d9 718
d522d9cc
TR
719 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
720 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
721 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 722
771fe6b9
JG
723 /* mutex initialization are all done here so we
724 * can recall function without having locking issues */
7a1619b9 725 radeon_mutex_init(&rdev->cs_mutex);
9fc04b50 726 radeon_mutex_init(&rdev->ib_pool.mutex);
bf852799 727 for (i = 0; i < RADEON_NUM_RINGS; ++i)
e32eb50d 728 mutex_init(&rdev->ring[i].mutex);
40bacf16 729 mutex_init(&rdev->dc_hw_i2c_mutex);
d8f60cfc
AD
730 if (rdev->family >= CHIP_R600)
731 spin_lock_init(&rdev->ih.lock);
4c788679 732 mutex_init(&rdev->gem.mutex);
c913e23a 733 mutex_init(&rdev->pm.mutex);
5876dd24 734 mutex_init(&rdev->vram_mutex);
7465280c 735 rwlock_init(&rdev->fence_lock);
15d3332f 736 rwlock_init(&rdev->semaphore_drv.lock);
9f022ddf 737 INIT_LIST_HEAD(&rdev->gem.objects);
73a6d3fc 738 init_waitqueue_head(&rdev->irq.vblank_queue);
2031f77c 739 init_waitqueue_head(&rdev->irq.idle_queue);
c1341e52 740 INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
721604a1
JG
741 /* initialize vm here */
742 rdev->vm_manager.use_bitmap = 1;
743 rdev->vm_manager.max_pfn = 1 << 20;
744 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 745
4aac0473
JG
746 /* Set asic functions */
747 r = radeon_asic_init(rdev);
36421338 748 if (r)
4aac0473 749 return r;
36421338 750 radeon_check_arguments(rdev);
4aac0473 751
f95df9ca
AD
752 /* all of the newer IGP chips have an internal gart
753 * However some rs4xx report as AGP, so remove that here.
754 */
755 if ((rdev->family >= CHIP_RS400) &&
756 (rdev->flags & RADEON_IS_IGP)) {
757 rdev->flags &= ~RADEON_IS_AGP;
758 }
759
30256a3f 760 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 761 radeon_agp_disable(rdev);
771fe6b9
JG
762 }
763
ad49f501
DA
764 /* set DMA mask + need_dma32 flags.
765 * PCIE - can handle 40-bits.
005a83f1 766 * IGP - can handle 40-bits
ad49f501 767 * AGP - generally dma32 is safest
005a83f1 768 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
769 */
770 rdev->need_dma32 = false;
771 if (rdev->flags & RADEON_IS_AGP)
772 rdev->need_dma32 = true;
005a83f1
AD
773 if ((rdev->flags & RADEON_IS_PCI) &&
774 (rdev->family < CHIP_RS400))
ad49f501
DA
775 rdev->need_dma32 = true;
776
777 dma_bits = rdev->need_dma32 ? 32 : 40;
778 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 779 if (r) {
62fff811 780 rdev->need_dma32 = true;
c52494f6 781 dma_bits = 32;
771fe6b9
JG
782 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
783 }
c52494f6
KRW
784 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
785 if (r) {
786 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
787 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
788 }
771fe6b9
JG
789
790 /* Registers mapping */
791 /* TODO: block userspace mapping of io register */
01d73a69
JC
792 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
793 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
794 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
795 if (rdev->rmmio == NULL) {
796 return -ENOMEM;
797 }
798 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
799 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
800
351a52a2
AD
801 /* io port mapping */
802 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
803 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
804 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
805 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
806 break;
807 }
808 }
809 if (rdev->rio_mem == NULL)
810 DRM_ERROR("Unable to find PCI I/O BAR\n");
811
28d52043 812 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
813 /* this will fail for cards that aren't VGA class devices, just
814 * ignore it */
815 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
6a9ee8af
DA
816 vga_switcheroo_register_client(rdev->pdev,
817 radeon_switcheroo_set_state,
8d608aa6 818 NULL,
6a9ee8af 819 radeon_switcheroo_can_switch);
28d52043 820
3ce0a23d 821 r = radeon_init(rdev);
b574f251 822 if (r)
3ce0a23d 823 return r;
3ce0a23d 824
b574f251
JG
825 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
826 /* Acceleration not working on AGP card try again
827 * with fallback to PCI or PCIE GART
828 */
a2d07b74 829 radeon_asic_reset(rdev);
b574f251
JG
830 radeon_fini(rdev);
831 radeon_agp_disable(rdev);
832 r = radeon_init(rdev);
4aac0473
JG
833 if (r)
834 return r;
771fe6b9 835 }
60a7e396 836 if ((radeon_testing & 1)) {
ecc0b326
MD
837 radeon_test_moves(rdev);
838 }
60a7e396
CK
839 if ((radeon_testing & 2)) {
840 radeon_test_syncing(rdev);
841 }
771fe6b9 842 if (radeon_benchmarking) {
638dd7db 843 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 844 }
6cf8a3f5 845 return 0;
771fe6b9
JG
846}
847
4d8bf9ae
CK
848static void radeon_debugfs_remove_files(struct radeon_device *rdev);
849
771fe6b9
JG
850void radeon_device_fini(struct radeon_device *rdev)
851{
771fe6b9
JG
852 DRM_INFO("radeon: finishing device.\n");
853 rdev->shutdown = true;
90aca4d2
JG
854 /* evict vram memory */
855 radeon_bo_evict_vram(rdev);
62a8ea3f 856 radeon_fini(rdev);
6a9ee8af 857 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 858 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
859 if (rdev->rio_mem)
860 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 861 rdev->rio_mem = NULL;
771fe6b9
JG
862 iounmap(rdev->rmmio);
863 rdev->rmmio = NULL;
4d8bf9ae 864 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
865}
866
867
868/*
869 * Suspend & resume.
870 */
871int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
872{
875c1866 873 struct radeon_device *rdev;
771fe6b9 874 struct drm_crtc *crtc;
d8dcaa1d 875 struct drm_connector *connector;
7465280c 876 int i, r;
771fe6b9 877
875c1866 878 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
879 return -ENODEV;
880 }
881 if (state.event == PM_EVENT_PRETHAW) {
882 return 0;
883 }
875c1866
DJ
884 rdev = dev->dev_private;
885
5bcf719b 886 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 887 return 0;
d8dcaa1d 888
86698c20
SF
889 drm_kms_helper_poll_disable(dev);
890
d8dcaa1d
AD
891 /* turn off display hw */
892 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
893 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
894 }
895
771fe6b9
JG
896 /* unpin the front buffers */
897 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
898 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 899 struct radeon_bo *robj;
771fe6b9
JG
900
901 if (rfb == NULL || rfb->obj == NULL) {
902 continue;
903 }
7e4d15d9 904 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
905 /* don't unpin kernel fb objects */
906 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 907 r = radeon_bo_reserve(robj, false);
38651674 908 if (r == 0) {
4c788679
JG
909 radeon_bo_unpin(robj);
910 radeon_bo_unreserve(robj);
911 }
771fe6b9
JG
912 }
913 }
914 /* evict vram memory */
4c788679 915 radeon_bo_evict_vram(rdev);
771fe6b9 916 /* wait for gpu to finish processing current batch */
7465280c
AD
917 for (i = 0; i < RADEON_NUM_RINGS; i++)
918 radeon_fence_wait_last(rdev, i);
771fe6b9 919
f657c2a7
YZ
920 radeon_save_bios_scratch_regs(rdev);
921
ce8f5370 922 radeon_pm_suspend(rdev);
62a8ea3f 923 radeon_suspend(rdev);
d4877cf2 924 radeon_hpd_fini(rdev);
771fe6b9 925 /* evict remaining vram memory */
4c788679 926 radeon_bo_evict_vram(rdev);
771fe6b9 927
10b06122
JG
928 radeon_agp_suspend(rdev);
929
771fe6b9
JG
930 pci_save_state(dev->pdev);
931 if (state.event == PM_EVENT_SUSPEND) {
932 /* Shut down the device */
933 pci_disable_device(dev->pdev);
934 pci_set_power_state(dev->pdev, PCI_D3hot);
935 }
ac751efa 936 console_lock();
38651674 937 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 938 console_unlock();
771fe6b9
JG
939 return 0;
940}
941
942int radeon_resume_kms(struct drm_device *dev)
943{
09bdf591 944 struct drm_connector *connector;
771fe6b9 945 struct radeon_device *rdev = dev->dev_private;
771fe6b9 946
5bcf719b 947 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
948 return 0;
949
ac751efa 950 console_lock();
771fe6b9
JG
951 pci_set_power_state(dev->pdev, PCI_D0);
952 pci_restore_state(dev->pdev);
953 if (pci_enable_device(dev->pdev)) {
ac751efa 954 console_unlock();
771fe6b9
JG
955 return -1;
956 }
957 pci_set_master(dev->pdev);
0ebf1717
DA
958 /* resume AGP if in use */
959 radeon_agp_resume(rdev);
62a8ea3f 960 radeon_resume(rdev);
ce8f5370 961 radeon_pm_resume(rdev);
f657c2a7 962 radeon_restore_bios_scratch_regs(rdev);
09bdf591 963
38651674 964 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 965 console_unlock();
771fe6b9 966
3fa47d9e
AD
967 /* init dig PHYs, disp eng pll */
968 if (rdev->is_atom_bios) {
ac89af1e 969 radeon_atom_encoder_init(rdev);
f3f1f03e 970 radeon_atom_disp_eng_pll_init(rdev);
3fa47d9e 971 }
d4877cf2
AD
972 /* reset hpd state */
973 radeon_hpd_init(rdev);
771fe6b9
JG
974 /* blat the mode back in */
975 drm_helper_resume_force_mode(dev);
a93f344d
AD
976 /* turn on display hw */
977 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
978 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
979 }
86698c20
SF
980
981 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
982 return 0;
983}
984
90aca4d2
JG
985int radeon_gpu_reset(struct radeon_device *rdev)
986{
987 int r;
8fd1b84c 988 int resched;
90aca4d2 989
7a1619b9
MD
990 /* Prevent CS ioctl from interfering */
991 radeon_mutex_lock(&rdev->cs_mutex);
992
90aca4d2 993 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
994 /* block TTM */
995 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
996 radeon_suspend(rdev);
997
998 r = radeon_asic_reset(rdev);
999 if (!r) {
1000 dev_info(rdev->dev, "GPU reset succeed\n");
1001 radeon_resume(rdev);
1002 radeon_restore_bios_scratch_regs(rdev);
1003 drm_helper_resume_force_mode(rdev->ddev);
8fd1b84c 1004 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
90aca4d2 1005 }
7a1619b9
MD
1006
1007 radeon_mutex_unlock(&rdev->cs_mutex);
1008
1009 if (r) {
1010 /* bad news, how to tell it to userspace ? */
1011 dev_info(rdev->dev, "GPU reset failed\n");
1012 }
1013
90aca4d2
JG
1014 return r;
1015}
1016
771fe6b9
JG
1017
1018/*
1019 * Debugfs
1020 */
771fe6b9
JG
1021int radeon_debugfs_add_files(struct radeon_device *rdev,
1022 struct drm_info_list *files,
1023 unsigned nfiles)
1024{
1025 unsigned i;
1026
4d8bf9ae
CK
1027 for (i = 0; i < rdev->debugfs_count; i++) {
1028 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1029 /* Already registered */
1030 return 0;
1031 }
1032 }
c245cb9e 1033
4d8bf9ae 1034 i = rdev->debugfs_count + 1;
c245cb9e
MW
1035 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1036 DRM_ERROR("Reached maximum number of debugfs components.\n");
1037 DRM_ERROR("Report so we increase "
1038 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1039 return -EINVAL;
1040 }
4d8bf9ae
CK
1041 rdev->debugfs[rdev->debugfs_count].files = files;
1042 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1043 rdev->debugfs_count = i;
771fe6b9
JG
1044#if defined(CONFIG_DEBUG_FS)
1045 drm_debugfs_create_files(files, nfiles,
1046 rdev->ddev->control->debugfs_root,
1047 rdev->ddev->control);
1048 drm_debugfs_create_files(files, nfiles,
1049 rdev->ddev->primary->debugfs_root,
1050 rdev->ddev->primary);
1051#endif
1052 return 0;
1053}
1054
4d8bf9ae
CK
1055static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1056{
1057#if defined(CONFIG_DEBUG_FS)
1058 unsigned i;
1059
1060 for (i = 0; i < rdev->debugfs_count; i++) {
1061 drm_debugfs_remove_files(rdev->debugfs[i].files,
1062 rdev->debugfs[i].num_files,
1063 rdev->ddev->control);
1064 drm_debugfs_remove_files(rdev->debugfs[i].files,
1065 rdev->debugfs[i].num_files,
1066 rdev->ddev->primary);
1067 }
1068#endif
1069}
1070
771fe6b9
JG
1071#if defined(CONFIG_DEBUG_FS)
1072int radeon_debugfs_init(struct drm_minor *minor)
1073{
1074 return 0;
1075}
1076
1077void radeon_debugfs_cleanup(struct drm_minor *minor)
1078{
771fe6b9
JG
1079}
1080#endif
This page took 0.229634 seconds and 5 git commands to generate.