vga_switcheroo: Introduce struct vga_switcheroo_client_ops
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
1b5331d9
JG
96 "LAST",
97};
98
b1e3a6d1
MD
99/*
100 * Clear GPU surface registers.
101 */
3ce0a23d 102void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
103{
104 /* FIXME: check this out */
105 if (rdev->family < CHIP_R600) {
106 int i;
107
550e2d92
DA
108 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
109 if (rdev->surface_regs[i].bo)
110 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
111 else
112 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 113 }
e024e110
DA
114 /* enable surfaces */
115 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
116 }
117}
118
771fe6b9
JG
119/*
120 * GPU scratch registers helpers function.
121 */
3ce0a23d 122void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
123{
124 int i;
125
126 /* FIXME: check this out */
127 if (rdev->family < CHIP_R300) {
128 rdev->scratch.num_reg = 5;
129 } else {
130 rdev->scratch.num_reg = 7;
131 }
724c80e1 132 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
133 for (i = 0; i < rdev->scratch.num_reg; i++) {
134 rdev->scratch.free[i] = true;
724c80e1 135 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
136 }
137}
138
139int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
140{
141 int i;
142
143 for (i = 0; i < rdev->scratch.num_reg; i++) {
144 if (rdev->scratch.free[i]) {
145 rdev->scratch.free[i] = false;
146 *reg = rdev->scratch.reg[i];
147 return 0;
148 }
149 }
150 return -EINVAL;
151}
152
153void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
154{
155 int i;
156
157 for (i = 0; i < rdev->scratch.num_reg; i++) {
158 if (rdev->scratch.reg[i] == reg) {
159 rdev->scratch.free[i] = true;
160 return;
161 }
162 }
163}
164
724c80e1
AD
165void radeon_wb_disable(struct radeon_device *rdev)
166{
167 int r;
168
169 if (rdev->wb.wb_obj) {
170 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
171 if (unlikely(r != 0))
172 return;
173 radeon_bo_kunmap(rdev->wb.wb_obj);
174 radeon_bo_unpin(rdev->wb.wb_obj);
175 radeon_bo_unreserve(rdev->wb.wb_obj);
176 }
177 rdev->wb.enabled = false;
178}
179
180void radeon_wb_fini(struct radeon_device *rdev)
181{
182 radeon_wb_disable(rdev);
183 if (rdev->wb.wb_obj) {
184 radeon_bo_unref(&rdev->wb.wb_obj);
185 rdev->wb.wb = NULL;
186 rdev->wb.wb_obj = NULL;
187 }
188}
189
190int radeon_wb_init(struct radeon_device *rdev)
191{
192 int r;
193
194 if (rdev->wb.wb_obj == NULL) {
441921d5 195 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
724c80e1
AD
196 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
197 if (r) {
198 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
199 return r;
200 }
201 }
202 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
203 if (unlikely(r != 0)) {
204 radeon_wb_fini(rdev);
205 return r;
206 }
207 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
208 &rdev->wb.gpu_addr);
209 if (r) {
210 radeon_bo_unreserve(rdev->wb.wb_obj);
211 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
212 radeon_wb_fini(rdev);
213 return r;
214 }
215 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
216 radeon_bo_unreserve(rdev->wb.wb_obj);
217 if (r) {
218 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
219 radeon_wb_fini(rdev);
220 return r;
221 }
222
e6ba7599
AD
223 /* clear wb memory */
224 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
225 /* disable event_write fences */
226 rdev->wb.use_event = false;
724c80e1 227 /* disabled via module param */
3b7a2b24 228 if (radeon_no_wb == 1) {
724c80e1 229 rdev->wb.enabled = false;
3b7a2b24 230 } else {
724c80e1 231 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
232 /* often unreliable on AGP */
233 rdev->wb.enabled = false;
234 } else if (rdev->family < CHIP_R300) {
235 /* often unreliable on pre-r300 */
724c80e1 236 rdev->wb.enabled = false;
d0f8a854 237 } else {
724c80e1 238 rdev->wb.enabled = true;
d0f8a854 239 /* event_write fences are only available on r600+ */
3b7a2b24 240 if (rdev->family >= CHIP_R600) {
d0f8a854 241 rdev->wb.use_event = true;
3b7a2b24 242 }
d0f8a854 243 }
724c80e1 244 }
c994ead6
AD
245 /* always use writeback/events on NI, APUs */
246 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
247 rdev->wb.enabled = true;
248 rdev->wb.use_event = true;
249 }
724c80e1
AD
250
251 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
252
253 return 0;
254}
255
d594e46a
JG
256/**
257 * radeon_vram_location - try to find VRAM location
258 * @rdev: radeon device structure holding all necessary informations
259 * @mc: memory controller structure holding memory informations
260 * @base: base address at which to put VRAM
261 *
262 * Function will place try to place VRAM at base address provided
263 * as parameter (which is so far either PCI aperture address or
264 * for IGP TOM base address).
265 *
266 * If there is not enough space to fit the unvisible VRAM in the 32bits
267 * address space then we limit the VRAM size to the aperture.
268 *
269 * If we are using AGP and if the AGP aperture doesn't allow us to have
270 * room for all the VRAM than we restrict the VRAM to the PCI aperture
271 * size and print a warning.
272 *
273 * This function will never fails, worst case are limiting VRAM.
274 *
275 * Note: GTT start, end, size should be initialized before calling this
276 * function on AGP platform.
277 *
25985edc 278 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
279 * this shouldn't be a problem as we are using the PCI aperture as a reference.
280 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
281 * not IGP.
282 *
283 * Note: we use mc_vram_size as on some board we need to program the mc to
284 * cover the whole aperture even if VRAM size is inferior to aperture size
285 * Novell bug 204882 + along with lots of ubuntu ones
286 *
287 * Note: when limiting vram it's safe to overwritte real_vram_size because
288 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
289 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
290 * ones)
291 *
292 * Note: IGP TOM addr should be the same as the aperture addr, we don't
293 * explicitly check for that thought.
294 *
295 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 296 */
d594e46a 297void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 298{
d594e46a
JG
299 mc->vram_start = base;
300 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
301 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
302 mc->real_vram_size = mc->aper_size;
303 mc->mc_vram_size = mc->aper_size;
304 }
305 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 306 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
307 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
308 mc->real_vram_size = mc->aper_size;
309 mc->mc_vram_size = mc->aper_size;
310 }
311 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
ba95c45a
MD
312 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
313 mc->real_vram_size = radeon_vram_limit;
dd7cc55a 314 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
315 mc->mc_vram_size >> 20, mc->vram_start,
316 mc->vram_end, mc->real_vram_size >> 20);
317}
771fe6b9 318
d594e46a
JG
319/**
320 * radeon_gtt_location - try to find GTT location
321 * @rdev: radeon device structure holding all necessary informations
322 * @mc: memory controller structure holding memory informations
323 *
324 * Function will place try to place GTT before or after VRAM.
325 *
326 * If GTT size is bigger than space left then we ajust GTT size.
327 * Thus function will never fails.
328 *
329 * FIXME: when reducing GTT size align new size on power of 2.
330 */
331void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
332{
333 u64 size_af, size_bf;
334
8d369bb1
AD
335 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
336 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
337 if (size_bf > size_af) {
338 if (mc->gtt_size > size_bf) {
339 dev_warn(rdev->dev, "limiting GTT\n");
340 mc->gtt_size = size_bf;
771fe6b9 341 }
8d369bb1 342 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 343 } else {
d594e46a
JG
344 if (mc->gtt_size > size_af) {
345 dev_warn(rdev->dev, "limiting GTT\n");
346 mc->gtt_size = size_af;
347 }
8d369bb1 348 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 349 }
d594e46a 350 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 351 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 352 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
353}
354
771fe6b9
JG
355/*
356 * GPU helpers function.
357 */
9f022ddf 358bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
359{
360 uint32_t reg;
361
bcc65fd8
MG
362 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
363 return false;
364
771fe6b9 365 /* first check CRTCs */
18007401
AD
366 if (ASIC_IS_DCE41(rdev)) {
367 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
368 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
369 if (reg & EVERGREEN_CRTC_MASTER_EN)
370 return true;
371 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
372 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
373 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
374 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
375 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
376 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
377 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
378 if (reg & EVERGREEN_CRTC_MASTER_EN)
379 return true;
380 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
381 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
382 RREG32(AVIVO_D2CRTC_CONTROL);
383 if (reg & AVIVO_CRTC_EN) {
384 return true;
385 }
386 } else {
387 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
388 RREG32(RADEON_CRTC2_GEN_CNTL);
389 if (reg & RADEON_CRTC_EN) {
390 return true;
391 }
392 }
393
394 /* then check MEM_SIZE, in case the crtcs are off */
395 if (rdev->family >= CHIP_R600)
396 reg = RREG32(R600_CONFIG_MEMSIZE);
397 else
398 reg = RREG32(RADEON_CONFIG_MEMSIZE);
399
400 if (reg)
401 return true;
402
403 return false;
404
405}
406
f47299c5
AD
407void radeon_update_bandwidth_info(struct radeon_device *rdev)
408{
409 fixed20_12 a;
8807286e
AD
410 u32 sclk = rdev->pm.current_sclk;
411 u32 mclk = rdev->pm.current_mclk;
f47299c5 412
8807286e
AD
413 /* sclk/mclk in Mhz */
414 a.full = dfixed_const(100);
415 rdev->pm.sclk.full = dfixed_const(sclk);
416 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
417 rdev->pm.mclk.full = dfixed_const(mclk);
418 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 419
8807286e 420 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 421 a.full = dfixed_const(16);
f47299c5 422 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 423 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
424 }
425}
426
72542d77
DA
427bool radeon_boot_test_post_card(struct radeon_device *rdev)
428{
429 if (radeon_card_posted(rdev))
430 return true;
431
432 if (rdev->bios) {
433 DRM_INFO("GPU not posted. posting now...\n");
434 if (rdev->is_atom_bios)
435 atom_asic_init(rdev->mode_info.atom_context);
436 else
437 radeon_combios_asic_init(rdev->ddev);
438 return true;
439 } else {
440 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
441 return false;
442 }
443}
444
3ce0a23d
JG
445int radeon_dummy_page_init(struct radeon_device *rdev)
446{
82568565
DA
447 if (rdev->dummy_page.page)
448 return 0;
3ce0a23d
JG
449 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
450 if (rdev->dummy_page.page == NULL)
451 return -ENOMEM;
452 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
453 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
454 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
455 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
456 __free_page(rdev->dummy_page.page);
457 rdev->dummy_page.page = NULL;
458 return -ENOMEM;
459 }
460 return 0;
461}
462
463void radeon_dummy_page_fini(struct radeon_device *rdev)
464{
465 if (rdev->dummy_page.page == NULL)
466 return;
467 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
468 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
469 __free_page(rdev->dummy_page.page);
470 rdev->dummy_page.page = NULL;
471}
472
771fe6b9 473
771fe6b9
JG
474/* ATOM accessor methods */
475static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
476{
477 struct radeon_device *rdev = info->dev->dev_private;
478 uint32_t r;
479
480 r = rdev->pll_rreg(rdev, reg);
481 return r;
482}
483
484static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
485{
486 struct radeon_device *rdev = info->dev->dev_private;
487
488 rdev->pll_wreg(rdev, reg, val);
489}
490
491static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
492{
493 struct radeon_device *rdev = info->dev->dev_private;
494 uint32_t r;
495
496 r = rdev->mc_rreg(rdev, reg);
497 return r;
498}
499
500static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
501{
502 struct radeon_device *rdev = info->dev->dev_private;
503
504 rdev->mc_wreg(rdev, reg, val);
505}
506
507static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
508{
509 struct radeon_device *rdev = info->dev->dev_private;
510
511 WREG32(reg*4, val);
512}
513
514static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
515{
516 struct radeon_device *rdev = info->dev->dev_private;
517 uint32_t r;
518
519 r = RREG32(reg*4);
520 return r;
521}
522
351a52a2
AD
523static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
524{
525 struct radeon_device *rdev = info->dev->dev_private;
526
527 WREG32_IO(reg*4, val);
528}
529
530static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
531{
532 struct radeon_device *rdev = info->dev->dev_private;
533 uint32_t r;
534
535 r = RREG32_IO(reg*4);
536 return r;
537}
538
771fe6b9
JG
539int radeon_atombios_init(struct radeon_device *rdev)
540{
61c4b24b
MF
541 struct card_info *atom_card_info =
542 kzalloc(sizeof(struct card_info), GFP_KERNEL);
543
544 if (!atom_card_info)
545 return -ENOMEM;
546
547 rdev->mode_info.atom_card_info = atom_card_info;
548 atom_card_info->dev = rdev->ddev;
549 atom_card_info->reg_read = cail_reg_read;
550 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
551 /* needed for iio ops */
552 if (rdev->rio_mem) {
553 atom_card_info->ioreg_read = cail_ioreg_read;
554 atom_card_info->ioreg_write = cail_ioreg_write;
555 } else {
556 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
557 atom_card_info->ioreg_read = cail_reg_read;
558 atom_card_info->ioreg_write = cail_reg_write;
559 }
61c4b24b
MF
560 atom_card_info->mc_read = cail_mc_read;
561 atom_card_info->mc_write = cail_mc_write;
562 atom_card_info->pll_read = cail_pll_read;
563 atom_card_info->pll_write = cail_pll_write;
564
565 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
c31ad97f 566 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 567 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 568 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
569 return 0;
570}
571
572void radeon_atombios_fini(struct radeon_device *rdev)
573{
4a04a844
JG
574 if (rdev->mode_info.atom_context) {
575 kfree(rdev->mode_info.atom_context->scratch);
576 kfree(rdev->mode_info.atom_context);
577 }
61c4b24b 578 kfree(rdev->mode_info.atom_card_info);
771fe6b9
JG
579}
580
581int radeon_combios_init(struct radeon_device *rdev)
582{
583 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
584 return 0;
585}
586
587void radeon_combios_fini(struct radeon_device *rdev)
588{
589}
590
28d52043
DA
591/* if we get transitioned to only one device, tak VGA back */
592static unsigned int radeon_vga_set_decode(void *cookie, bool state)
593{
594 struct radeon_device *rdev = cookie;
28d52043
DA
595 radeon_vga_set_state(rdev, state);
596 if (state)
597 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
598 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
599 else
600 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
601}
c1176d6f 602
36421338
JG
603void radeon_check_arguments(struct radeon_device *rdev)
604{
605 /* vramlimit must be a power of two */
606 switch (radeon_vram_limit) {
607 case 0:
608 case 4:
609 case 8:
610 case 16:
611 case 32:
612 case 64:
613 case 128:
614 case 256:
615 case 512:
616 case 1024:
617 case 2048:
618 case 4096:
619 break;
620 default:
621 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
622 radeon_vram_limit);
623 radeon_vram_limit = 0;
624 break;
625 }
626 radeon_vram_limit = radeon_vram_limit << 20;
627 /* gtt size must be power of two and greater or equal to 32M */
628 switch (radeon_gart_size) {
629 case 4:
630 case 8:
631 case 16:
632 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
633 radeon_gart_size);
634 radeon_gart_size = 512;
635 break;
636 case 32:
637 case 64:
638 case 128:
639 case 256:
640 case 512:
641 case 1024:
642 case 2048:
643 case 4096:
644 break;
645 default:
646 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
647 radeon_gart_size);
648 radeon_gart_size = 512;
649 break;
650 }
651 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
652 /* AGP mode can only be -1, 1, 2, 4, 8 */
653 switch (radeon_agpmode) {
654 case -1:
655 case 0:
656 case 1:
657 case 2:
658 case 4:
659 case 8:
660 break;
661 default:
662 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
663 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
664 radeon_agpmode = 0;
665 break;
666 }
667}
668
6a9ee8af
DA
669static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
670{
671 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
672 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
673 if (state == VGA_SWITCHEROO_ON) {
674 printk(KERN_INFO "radeon: switched on\n");
675 /* don't suspend or resume card normally */
5bcf719b 676 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 677 radeon_resume_kms(dev);
5bcf719b 678 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 679 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
680 } else {
681 printk(KERN_INFO "radeon: switched off\n");
fbf81762 682 drm_kms_helper_poll_disable(dev);
5bcf719b 683 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 684 radeon_suspend_kms(dev, pmm);
5bcf719b 685 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
686 }
687}
688
689static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
690{
691 struct drm_device *dev = pci_get_drvdata(pdev);
692 bool can_switch;
693
694 spin_lock(&dev->count_lock);
695 can_switch = (dev->open_count == 0);
696 spin_unlock(&dev->count_lock);
697 return can_switch;
698}
699
26ec685f
TI
700static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
701 .set_gpu_state = radeon_switcheroo_set_state,
702 .reprobe = NULL,
703 .can_switch = radeon_switcheroo_can_switch,
704};
6a9ee8af 705
771fe6b9
JG
706int radeon_device_init(struct radeon_device *rdev,
707 struct drm_device *ddev,
708 struct pci_dev *pdev,
709 uint32_t flags)
710{
351a52a2 711 int r, i;
ad49f501 712 int dma_bits;
771fe6b9 713
771fe6b9 714 rdev->shutdown = false;
9f022ddf 715 rdev->dev = &pdev->dev;
771fe6b9
JG
716 rdev->ddev = ddev;
717 rdev->pdev = pdev;
718 rdev->flags = flags;
719 rdev->family = flags & RADEON_FAMILY_MASK;
720 rdev->is_atom_bios = false;
721 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
722 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 723 rdev->accel_working = false;
1b5331d9 724
d522d9cc
TR
725 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
726 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
727 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 728
771fe6b9
JG
729 /* mutex initialization are all done here so we
730 * can recall function without having locking issues */
7a1619b9 731 radeon_mutex_init(&rdev->cs_mutex);
d6999bc7 732 mutex_init(&rdev->ring_lock);
40bacf16 733 mutex_init(&rdev->dc_hw_i2c_mutex);
d8f60cfc
AD
734 if (rdev->family >= CHIP_R600)
735 spin_lock_init(&rdev->ih.lock);
4c788679 736 mutex_init(&rdev->gem.mutex);
c913e23a 737 mutex_init(&rdev->pm.mutex);
5876dd24 738 mutex_init(&rdev->vram_mutex);
9f022ddf 739 INIT_LIST_HEAD(&rdev->gem.objects);
73a6d3fc 740 init_waitqueue_head(&rdev->irq.vblank_queue);
2031f77c 741 init_waitqueue_head(&rdev->irq.idle_queue);
721604a1
JG
742 /* initialize vm here */
743 rdev->vm_manager.use_bitmap = 1;
744 rdev->vm_manager.max_pfn = 1 << 20;
745 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 746
4aac0473
JG
747 /* Set asic functions */
748 r = radeon_asic_init(rdev);
36421338 749 if (r)
4aac0473 750 return r;
36421338 751 radeon_check_arguments(rdev);
4aac0473 752
f95df9ca
AD
753 /* all of the newer IGP chips have an internal gart
754 * However some rs4xx report as AGP, so remove that here.
755 */
756 if ((rdev->family >= CHIP_RS400) &&
757 (rdev->flags & RADEON_IS_IGP)) {
758 rdev->flags &= ~RADEON_IS_AGP;
759 }
760
30256a3f 761 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 762 radeon_agp_disable(rdev);
771fe6b9
JG
763 }
764
ad49f501
DA
765 /* set DMA mask + need_dma32 flags.
766 * PCIE - can handle 40-bits.
005a83f1 767 * IGP - can handle 40-bits
ad49f501 768 * AGP - generally dma32 is safest
005a83f1 769 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
770 */
771 rdev->need_dma32 = false;
772 if (rdev->flags & RADEON_IS_AGP)
773 rdev->need_dma32 = true;
005a83f1
AD
774 if ((rdev->flags & RADEON_IS_PCI) &&
775 (rdev->family < CHIP_RS400))
ad49f501
DA
776 rdev->need_dma32 = true;
777
778 dma_bits = rdev->need_dma32 ? 32 : 40;
779 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 780 if (r) {
62fff811 781 rdev->need_dma32 = true;
c52494f6 782 dma_bits = 32;
771fe6b9
JG
783 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
784 }
c52494f6
KRW
785 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
786 if (r) {
787 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
788 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
789 }
771fe6b9
JG
790
791 /* Registers mapping */
792 /* TODO: block userspace mapping of io register */
01d73a69
JC
793 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
794 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
795 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
796 if (rdev->rmmio == NULL) {
797 return -ENOMEM;
798 }
799 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
800 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
801
351a52a2
AD
802 /* io port mapping */
803 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
804 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
805 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
806 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
807 break;
808 }
809 }
810 if (rdev->rio_mem == NULL)
811 DRM_ERROR("Unable to find PCI I/O BAR\n");
812
28d52043 813 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
814 /* this will fail for cards that aren't VGA class devices, just
815 * ignore it */
816 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 817 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 818
3ce0a23d 819 r = radeon_init(rdev);
b574f251 820 if (r)
3ce0a23d 821 return r;
3ce0a23d 822
b574f251
JG
823 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
824 /* Acceleration not working on AGP card try again
825 * with fallback to PCI or PCIE GART
826 */
a2d07b74 827 radeon_asic_reset(rdev);
b574f251
JG
828 radeon_fini(rdev);
829 radeon_agp_disable(rdev);
830 r = radeon_init(rdev);
4aac0473
JG
831 if (r)
832 return r;
771fe6b9 833 }
60a7e396 834 if ((radeon_testing & 1)) {
ecc0b326
MD
835 radeon_test_moves(rdev);
836 }
60a7e396
CK
837 if ((radeon_testing & 2)) {
838 radeon_test_syncing(rdev);
839 }
771fe6b9 840 if (radeon_benchmarking) {
638dd7db 841 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 842 }
6cf8a3f5 843 return 0;
771fe6b9
JG
844}
845
4d8bf9ae
CK
846static void radeon_debugfs_remove_files(struct radeon_device *rdev);
847
771fe6b9
JG
848void radeon_device_fini(struct radeon_device *rdev)
849{
771fe6b9
JG
850 DRM_INFO("radeon: finishing device.\n");
851 rdev->shutdown = true;
90aca4d2
JG
852 /* evict vram memory */
853 radeon_bo_evict_vram(rdev);
62a8ea3f 854 radeon_fini(rdev);
6a9ee8af 855 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 856 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
857 if (rdev->rio_mem)
858 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 859 rdev->rio_mem = NULL;
771fe6b9
JG
860 iounmap(rdev->rmmio);
861 rdev->rmmio = NULL;
4d8bf9ae 862 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
863}
864
865
866/*
867 * Suspend & resume.
868 */
869int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
870{
875c1866 871 struct radeon_device *rdev;
771fe6b9 872 struct drm_crtc *crtc;
d8dcaa1d 873 struct drm_connector *connector;
7465280c 874 int i, r;
771fe6b9 875
875c1866 876 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
877 return -ENODEV;
878 }
879 if (state.event == PM_EVENT_PRETHAW) {
880 return 0;
881 }
875c1866
DJ
882 rdev = dev->dev_private;
883
5bcf719b 884 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 885 return 0;
d8dcaa1d 886
86698c20
SF
887 drm_kms_helper_poll_disable(dev);
888
d8dcaa1d
AD
889 /* turn off display hw */
890 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
891 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
892 }
893
771fe6b9
JG
894 /* unpin the front buffers */
895 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
896 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 897 struct radeon_bo *robj;
771fe6b9
JG
898
899 if (rfb == NULL || rfb->obj == NULL) {
900 continue;
901 }
7e4d15d9 902 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
903 /* don't unpin kernel fb objects */
904 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 905 r = radeon_bo_reserve(robj, false);
38651674 906 if (r == 0) {
4c788679
JG
907 radeon_bo_unpin(robj);
908 radeon_bo_unreserve(robj);
909 }
771fe6b9
JG
910 }
911 }
912 /* evict vram memory */
4c788679 913 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
914
915 mutex_lock(&rdev->ring_lock);
771fe6b9 916 /* wait for gpu to finish processing current batch */
7465280c 917 for (i = 0; i < RADEON_NUM_RINGS; i++)
8a47cc9e
CK
918 radeon_fence_wait_empty_locked(rdev, i);
919 mutex_unlock(&rdev->ring_lock);
771fe6b9 920
f657c2a7
YZ
921 radeon_save_bios_scratch_regs(rdev);
922
ce8f5370 923 radeon_pm_suspend(rdev);
62a8ea3f 924 radeon_suspend(rdev);
d4877cf2 925 radeon_hpd_fini(rdev);
771fe6b9 926 /* evict remaining vram memory */
4c788679 927 radeon_bo_evict_vram(rdev);
771fe6b9 928
10b06122
JG
929 radeon_agp_suspend(rdev);
930
771fe6b9
JG
931 pci_save_state(dev->pdev);
932 if (state.event == PM_EVENT_SUSPEND) {
933 /* Shut down the device */
934 pci_disable_device(dev->pdev);
935 pci_set_power_state(dev->pdev, PCI_D3hot);
936 }
ac751efa 937 console_lock();
38651674 938 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 939 console_unlock();
771fe6b9
JG
940 return 0;
941}
942
943int radeon_resume_kms(struct drm_device *dev)
944{
09bdf591 945 struct drm_connector *connector;
771fe6b9 946 struct radeon_device *rdev = dev->dev_private;
771fe6b9 947
5bcf719b 948 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
949 return 0;
950
ac751efa 951 console_lock();
771fe6b9
JG
952 pci_set_power_state(dev->pdev, PCI_D0);
953 pci_restore_state(dev->pdev);
954 if (pci_enable_device(dev->pdev)) {
ac751efa 955 console_unlock();
771fe6b9
JG
956 return -1;
957 }
0ebf1717
DA
958 /* resume AGP if in use */
959 radeon_agp_resume(rdev);
62a8ea3f 960 radeon_resume(rdev);
ce8f5370 961 radeon_pm_resume(rdev);
f657c2a7 962 radeon_restore_bios_scratch_regs(rdev);
09bdf591 963
38651674 964 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 965 console_unlock();
771fe6b9 966
3fa47d9e
AD
967 /* init dig PHYs, disp eng pll */
968 if (rdev->is_atom_bios) {
ac89af1e 969 radeon_atom_encoder_init(rdev);
f3f1f03e 970 radeon_atom_disp_eng_pll_init(rdev);
3fa47d9e 971 }
d4877cf2
AD
972 /* reset hpd state */
973 radeon_hpd_init(rdev);
771fe6b9
JG
974 /* blat the mode back in */
975 drm_helper_resume_force_mode(dev);
a93f344d
AD
976 /* turn on display hw */
977 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
978 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
979 }
86698c20
SF
980
981 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
982 return 0;
983}
984
90aca4d2
JG
985int radeon_gpu_reset(struct radeon_device *rdev)
986{
987 int r;
8fd1b84c 988 int resched;
90aca4d2
JG
989
990 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
991 /* block TTM */
992 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
993 radeon_suspend(rdev);
994
995 r = radeon_asic_reset(rdev);
996 if (!r) {
997 dev_info(rdev->dev, "GPU reset succeed\n");
998 radeon_resume(rdev);
999 radeon_restore_bios_scratch_regs(rdev);
1000 drm_helper_resume_force_mode(rdev->ddev);
8fd1b84c 1001 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
90aca4d2 1002 }
7a1619b9 1003
7a1619b9
MD
1004 if (r) {
1005 /* bad news, how to tell it to userspace ? */
1006 dev_info(rdev->dev, "GPU reset failed\n");
1007 }
1008
90aca4d2
JG
1009 return r;
1010}
1011
771fe6b9
JG
1012
1013/*
1014 * Debugfs
1015 */
771fe6b9
JG
1016int radeon_debugfs_add_files(struct radeon_device *rdev,
1017 struct drm_info_list *files,
1018 unsigned nfiles)
1019{
1020 unsigned i;
1021
4d8bf9ae
CK
1022 for (i = 0; i < rdev->debugfs_count; i++) {
1023 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1024 /* Already registered */
1025 return 0;
1026 }
1027 }
c245cb9e 1028
4d8bf9ae 1029 i = rdev->debugfs_count + 1;
c245cb9e
MW
1030 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1031 DRM_ERROR("Reached maximum number of debugfs components.\n");
1032 DRM_ERROR("Report so we increase "
1033 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1034 return -EINVAL;
1035 }
4d8bf9ae
CK
1036 rdev->debugfs[rdev->debugfs_count].files = files;
1037 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1038 rdev->debugfs_count = i;
771fe6b9
JG
1039#if defined(CONFIG_DEBUG_FS)
1040 drm_debugfs_create_files(files, nfiles,
1041 rdev->ddev->control->debugfs_root,
1042 rdev->ddev->control);
1043 drm_debugfs_create_files(files, nfiles,
1044 rdev->ddev->primary->debugfs_root,
1045 rdev->ddev->primary);
1046#endif
1047 return 0;
1048}
1049
4d8bf9ae
CK
1050static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1051{
1052#if defined(CONFIG_DEBUG_FS)
1053 unsigned i;
1054
1055 for (i = 0; i < rdev->debugfs_count; i++) {
1056 drm_debugfs_remove_files(rdev->debugfs[i].files,
1057 rdev->debugfs[i].num_files,
1058 rdev->ddev->control);
1059 drm_debugfs_remove_files(rdev->debugfs[i].files,
1060 rdev->debugfs[i].num_files,
1061 rdev->ddev->primary);
1062 }
1063#endif
1064}
1065
771fe6b9
JG
1066#if defined(CONFIG_DEBUG_FS)
1067int radeon_debugfs_init(struct drm_minor *minor)
1068{
1069 return 0;
1070}
1071
1072void radeon_debugfs_cleanup(struct drm_minor *minor)
1073{
771fe6b9
JG
1074}
1075#endif
This page took 0.236936 seconds and 5 git commands to generate.