Merge tag 'batman-adv-fix-for-davem' of git://git.open-mesh.org/linux-merge
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
1b5331d9
JG
98 "LAST",
99};
100
2e1b65f9
AD
101/**
102 * radeon_program_register_sequence - program an array of registers.
103 *
104 * @rdev: radeon_device pointer
105 * @registers: pointer to the register array
106 * @array_size: size of the register array
107 *
108 * Programs an array or registers with and and or masks.
109 * This is a helper for setting golden registers.
110 */
111void radeon_program_register_sequence(struct radeon_device *rdev,
112 const u32 *registers,
113 const u32 array_size)
114{
115 u32 tmp, reg, and_mask, or_mask;
116 int i;
117
118 if (array_size % 3)
119 return;
120
121 for (i = 0; i < array_size; i +=3) {
122 reg = registers[i + 0];
123 and_mask = registers[i + 1];
124 or_mask = registers[i + 2];
125
126 if (and_mask == 0xffffffff) {
127 tmp = or_mask;
128 } else {
129 tmp = RREG32(reg);
130 tmp &= ~and_mask;
131 tmp |= or_mask;
132 }
133 WREG32(reg, tmp);
134 }
135}
136
0c195119
AD
137/**
138 * radeon_surface_init - Clear GPU surface registers.
139 *
140 * @rdev: radeon_device pointer
141 *
142 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 143 */
3ce0a23d 144void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
145{
146 /* FIXME: check this out */
147 if (rdev->family < CHIP_R600) {
148 int i;
149
550e2d92
DA
150 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
151 if (rdev->surface_regs[i].bo)
152 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
153 else
154 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 155 }
e024e110
DA
156 /* enable surfaces */
157 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
158 }
159}
160
771fe6b9
JG
161/*
162 * GPU scratch registers helpers function.
163 */
0c195119
AD
164/**
165 * radeon_scratch_init - Init scratch register driver information.
166 *
167 * @rdev: radeon_device pointer
168 *
169 * Init CP scratch register driver information (r1xx-r5xx)
170 */
3ce0a23d 171void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
172{
173 int i;
174
175 /* FIXME: check this out */
176 if (rdev->family < CHIP_R300) {
177 rdev->scratch.num_reg = 5;
178 } else {
179 rdev->scratch.num_reg = 7;
180 }
724c80e1 181 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
182 for (i = 0; i < rdev->scratch.num_reg; i++) {
183 rdev->scratch.free[i] = true;
724c80e1 184 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
185 }
186}
187
0c195119
AD
188/**
189 * radeon_scratch_get - Allocate a scratch register
190 *
191 * @rdev: radeon_device pointer
192 * @reg: scratch register mmio offset
193 *
194 * Allocate a CP scratch register for use by the driver (all asics).
195 * Returns 0 on success or -EINVAL on failure.
196 */
771fe6b9
JG
197int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
198{
199 int i;
200
201 for (i = 0; i < rdev->scratch.num_reg; i++) {
202 if (rdev->scratch.free[i]) {
203 rdev->scratch.free[i] = false;
204 *reg = rdev->scratch.reg[i];
205 return 0;
206 }
207 }
208 return -EINVAL;
209}
210
0c195119
AD
211/**
212 * radeon_scratch_free - Free a scratch register
213 *
214 * @rdev: radeon_device pointer
215 * @reg: scratch register mmio offset
216 *
217 * Free a CP scratch register allocated for use by the driver (all asics)
218 */
771fe6b9
JG
219void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
220{
221 int i;
222
223 for (i = 0; i < rdev->scratch.num_reg; i++) {
224 if (rdev->scratch.reg[i] == reg) {
225 rdev->scratch.free[i] = true;
226 return;
227 }
228 }
229}
230
0c195119
AD
231/*
232 * radeon_wb_*()
233 * Writeback is the the method by which the the GPU updates special pages
234 * in memory with the status of certain GPU events (fences, ring pointers,
235 * etc.).
236 */
237
238/**
239 * radeon_wb_disable - Disable Writeback
240 *
241 * @rdev: radeon_device pointer
242 *
243 * Disables Writeback (all asics). Used for suspend.
244 */
724c80e1
AD
245void radeon_wb_disable(struct radeon_device *rdev)
246{
247 int r;
248
249 if (rdev->wb.wb_obj) {
250 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
251 if (unlikely(r != 0))
252 return;
253 radeon_bo_kunmap(rdev->wb.wb_obj);
254 radeon_bo_unpin(rdev->wb.wb_obj);
255 radeon_bo_unreserve(rdev->wb.wb_obj);
256 }
257 rdev->wb.enabled = false;
258}
259
0c195119
AD
260/**
261 * radeon_wb_fini - Disable Writeback and free memory
262 *
263 * @rdev: radeon_device pointer
264 *
265 * Disables Writeback and frees the Writeback memory (all asics).
266 * Used at driver shutdown.
267 */
724c80e1
AD
268void radeon_wb_fini(struct radeon_device *rdev)
269{
270 radeon_wb_disable(rdev);
271 if (rdev->wb.wb_obj) {
272 radeon_bo_unref(&rdev->wb.wb_obj);
273 rdev->wb.wb = NULL;
274 rdev->wb.wb_obj = NULL;
275 }
276}
277
0c195119
AD
278/**
279 * radeon_wb_init- Init Writeback driver info and allocate memory
280 *
281 * @rdev: radeon_device pointer
282 *
283 * Disables Writeback and frees the Writeback memory (all asics).
284 * Used at driver startup.
285 * Returns 0 on success or an -error on failure.
286 */
724c80e1
AD
287int radeon_wb_init(struct radeon_device *rdev)
288{
289 int r;
290
291 if (rdev->wb.wb_obj == NULL) {
441921d5 292 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 293 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
294 if (r) {
295 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
296 return r;
297 }
298 }
299 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
300 if (unlikely(r != 0)) {
301 radeon_wb_fini(rdev);
302 return r;
303 }
304 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
305 &rdev->wb.gpu_addr);
306 if (r) {
307 radeon_bo_unreserve(rdev->wb.wb_obj);
308 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
309 radeon_wb_fini(rdev);
310 return r;
311 }
312 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
313 radeon_bo_unreserve(rdev->wb.wb_obj);
314 if (r) {
315 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
316 radeon_wb_fini(rdev);
317 return r;
318 }
319
e6ba7599
AD
320 /* clear wb memory */
321 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
322 /* disable event_write fences */
323 rdev->wb.use_event = false;
724c80e1 324 /* disabled via module param */
3b7a2b24 325 if (radeon_no_wb == 1) {
724c80e1 326 rdev->wb.enabled = false;
3b7a2b24 327 } else {
724c80e1 328 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
329 /* often unreliable on AGP */
330 rdev->wb.enabled = false;
331 } else if (rdev->family < CHIP_R300) {
332 /* often unreliable on pre-r300 */
724c80e1 333 rdev->wb.enabled = false;
d0f8a854 334 } else {
724c80e1 335 rdev->wb.enabled = true;
d0f8a854 336 /* event_write fences are only available on r600+ */
3b7a2b24 337 if (rdev->family >= CHIP_R600) {
d0f8a854 338 rdev->wb.use_event = true;
3b7a2b24 339 }
d0f8a854 340 }
724c80e1 341 }
c994ead6
AD
342 /* always use writeback/events on NI, APUs */
343 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
344 rdev->wb.enabled = true;
345 rdev->wb.use_event = true;
346 }
724c80e1
AD
347
348 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
349
350 return 0;
351}
352
d594e46a
JG
353/**
354 * radeon_vram_location - try to find VRAM location
355 * @rdev: radeon device structure holding all necessary informations
356 * @mc: memory controller structure holding memory informations
357 * @base: base address at which to put VRAM
358 *
359 * Function will place try to place VRAM at base address provided
360 * as parameter (which is so far either PCI aperture address or
361 * for IGP TOM base address).
362 *
363 * If there is not enough space to fit the unvisible VRAM in the 32bits
364 * address space then we limit the VRAM size to the aperture.
365 *
366 * If we are using AGP and if the AGP aperture doesn't allow us to have
367 * room for all the VRAM than we restrict the VRAM to the PCI aperture
368 * size and print a warning.
369 *
370 * This function will never fails, worst case are limiting VRAM.
371 *
372 * Note: GTT start, end, size should be initialized before calling this
373 * function on AGP platform.
374 *
25985edc 375 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
376 * this shouldn't be a problem as we are using the PCI aperture as a reference.
377 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
378 * not IGP.
379 *
380 * Note: we use mc_vram_size as on some board we need to program the mc to
381 * cover the whole aperture even if VRAM size is inferior to aperture size
382 * Novell bug 204882 + along with lots of ubuntu ones
383 *
384 * Note: when limiting vram it's safe to overwritte real_vram_size because
385 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
386 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
387 * ones)
388 *
389 * Note: IGP TOM addr should be the same as the aperture addr, we don't
390 * explicitly check for that thought.
391 *
392 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 393 */
d594e46a 394void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 395{
1bcb04f7
CK
396 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
397
d594e46a 398 mc->vram_start = base;
9ed8b1f9 399 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
400 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
401 mc->real_vram_size = mc->aper_size;
402 mc->mc_vram_size = mc->aper_size;
403 }
404 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 405 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
406 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
407 mc->real_vram_size = mc->aper_size;
408 mc->mc_vram_size = mc->aper_size;
409 }
410 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
411 if (limit && limit < mc->real_vram_size)
412 mc->real_vram_size = limit;
dd7cc55a 413 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
414 mc->mc_vram_size >> 20, mc->vram_start,
415 mc->vram_end, mc->real_vram_size >> 20);
416}
771fe6b9 417
d594e46a
JG
418/**
419 * radeon_gtt_location - try to find GTT location
420 * @rdev: radeon device structure holding all necessary informations
421 * @mc: memory controller structure holding memory informations
422 *
423 * Function will place try to place GTT before or after VRAM.
424 *
425 * If GTT size is bigger than space left then we ajust GTT size.
426 * Thus function will never fails.
427 *
428 * FIXME: when reducing GTT size align new size on power of 2.
429 */
430void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
431{
432 u64 size_af, size_bf;
433
9ed8b1f9 434 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 435 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
436 if (size_bf > size_af) {
437 if (mc->gtt_size > size_bf) {
438 dev_warn(rdev->dev, "limiting GTT\n");
439 mc->gtt_size = size_bf;
771fe6b9 440 }
8d369bb1 441 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 442 } else {
d594e46a
JG
443 if (mc->gtt_size > size_af) {
444 dev_warn(rdev->dev, "limiting GTT\n");
445 mc->gtt_size = size_af;
446 }
8d369bb1 447 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 448 }
d594e46a 449 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 450 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 451 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
452}
453
771fe6b9
JG
454/*
455 * GPU helpers function.
456 */
0c195119
AD
457/**
458 * radeon_card_posted - check if the hw has already been initialized
459 *
460 * @rdev: radeon_device pointer
461 *
462 * Check if the asic has been initialized (all asics).
463 * Used at driver startup.
464 * Returns true if initialized or false if not.
465 */
9f022ddf 466bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
467{
468 uint32_t reg;
469
50a583f6 470 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 471 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
472 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
473 (rdev->family < CHIP_R600))
bcc65fd8
MG
474 return false;
475
2cf3a4fc
AD
476 if (ASIC_IS_NODCE(rdev))
477 goto check_memsize;
478
771fe6b9 479 /* first check CRTCs */
09fb8bd1 480 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
481 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
482 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
483 if (rdev->num_crtc >= 4) {
484 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
485 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
486 }
487 if (rdev->num_crtc >= 6) {
488 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
489 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
490 }
bcc1c2a1
AD
491 if (reg & EVERGREEN_CRTC_MASTER_EN)
492 return true;
493 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
494 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
495 RREG32(AVIVO_D2CRTC_CONTROL);
496 if (reg & AVIVO_CRTC_EN) {
497 return true;
498 }
499 } else {
500 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
501 RREG32(RADEON_CRTC2_GEN_CNTL);
502 if (reg & RADEON_CRTC_EN) {
503 return true;
504 }
505 }
506
2cf3a4fc 507check_memsize:
771fe6b9
JG
508 /* then check MEM_SIZE, in case the crtcs are off */
509 if (rdev->family >= CHIP_R600)
510 reg = RREG32(R600_CONFIG_MEMSIZE);
511 else
512 reg = RREG32(RADEON_CONFIG_MEMSIZE);
513
514 if (reg)
515 return true;
516
517 return false;
518
519}
520
0c195119
AD
521/**
522 * radeon_update_bandwidth_info - update display bandwidth params
523 *
524 * @rdev: radeon_device pointer
525 *
526 * Used when sclk/mclk are switched or display modes are set.
527 * params are used to calculate display watermarks (all asics)
528 */
f47299c5
AD
529void radeon_update_bandwidth_info(struct radeon_device *rdev)
530{
531 fixed20_12 a;
8807286e
AD
532 u32 sclk = rdev->pm.current_sclk;
533 u32 mclk = rdev->pm.current_mclk;
f47299c5 534
8807286e
AD
535 /* sclk/mclk in Mhz */
536 a.full = dfixed_const(100);
537 rdev->pm.sclk.full = dfixed_const(sclk);
538 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
539 rdev->pm.mclk.full = dfixed_const(mclk);
540 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 541
8807286e 542 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 543 a.full = dfixed_const(16);
f47299c5 544 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 545 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
546 }
547}
548
0c195119
AD
549/**
550 * radeon_boot_test_post_card - check and possibly initialize the hw
551 *
552 * @rdev: radeon_device pointer
553 *
554 * Check if the asic is initialized and if not, attempt to initialize
555 * it (all asics).
556 * Returns true if initialized or false if not.
557 */
72542d77
DA
558bool radeon_boot_test_post_card(struct radeon_device *rdev)
559{
560 if (radeon_card_posted(rdev))
561 return true;
562
563 if (rdev->bios) {
564 DRM_INFO("GPU not posted. posting now...\n");
565 if (rdev->is_atom_bios)
566 atom_asic_init(rdev->mode_info.atom_context);
567 else
568 radeon_combios_asic_init(rdev->ddev);
569 return true;
570 } else {
571 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
572 return false;
573 }
574}
575
0c195119
AD
576/**
577 * radeon_dummy_page_init - init dummy page used by the driver
578 *
579 * @rdev: radeon_device pointer
580 *
581 * Allocate the dummy page used by the driver (all asics).
582 * This dummy page is used by the driver as a filler for gart entries
583 * when pages are taken out of the GART
584 * Returns 0 on sucess, -ENOMEM on failure.
585 */
3ce0a23d
JG
586int radeon_dummy_page_init(struct radeon_device *rdev)
587{
82568565
DA
588 if (rdev->dummy_page.page)
589 return 0;
3ce0a23d
JG
590 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
591 if (rdev->dummy_page.page == NULL)
592 return -ENOMEM;
593 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
594 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
595 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
596 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
597 __free_page(rdev->dummy_page.page);
598 rdev->dummy_page.page = NULL;
599 return -ENOMEM;
600 }
601 return 0;
602}
603
0c195119
AD
604/**
605 * radeon_dummy_page_fini - free dummy page used by the driver
606 *
607 * @rdev: radeon_device pointer
608 *
609 * Frees the dummy page used by the driver (all asics).
610 */
3ce0a23d
JG
611void radeon_dummy_page_fini(struct radeon_device *rdev)
612{
613 if (rdev->dummy_page.page == NULL)
614 return;
615 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
616 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
617 __free_page(rdev->dummy_page.page);
618 rdev->dummy_page.page = NULL;
619}
620
771fe6b9 621
771fe6b9 622/* ATOM accessor methods */
0c195119
AD
623/*
624 * ATOM is an interpreted byte code stored in tables in the vbios. The
625 * driver registers callbacks to access registers and the interpreter
626 * in the driver parses the tables and executes then to program specific
627 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
628 * atombios.h, and atom.c
629 */
630
631/**
632 * cail_pll_read - read PLL register
633 *
634 * @info: atom card_info pointer
635 * @reg: PLL register offset
636 *
637 * Provides a PLL register accessor for the atom interpreter (r4xx+).
638 * Returns the value of the PLL register.
639 */
771fe6b9
JG
640static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
641{
642 struct radeon_device *rdev = info->dev->dev_private;
643 uint32_t r;
644
645 r = rdev->pll_rreg(rdev, reg);
646 return r;
647}
648
0c195119
AD
649/**
650 * cail_pll_write - write PLL register
651 *
652 * @info: atom card_info pointer
653 * @reg: PLL register offset
654 * @val: value to write to the pll register
655 *
656 * Provides a PLL register accessor for the atom interpreter (r4xx+).
657 */
771fe6b9
JG
658static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
659{
660 struct radeon_device *rdev = info->dev->dev_private;
661
662 rdev->pll_wreg(rdev, reg, val);
663}
664
0c195119
AD
665/**
666 * cail_mc_read - read MC (Memory Controller) register
667 *
668 * @info: atom card_info pointer
669 * @reg: MC register offset
670 *
671 * Provides an MC register accessor for the atom interpreter (r4xx+).
672 * Returns the value of the MC register.
673 */
771fe6b9
JG
674static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
675{
676 struct radeon_device *rdev = info->dev->dev_private;
677 uint32_t r;
678
679 r = rdev->mc_rreg(rdev, reg);
680 return r;
681}
682
0c195119
AD
683/**
684 * cail_mc_write - write MC (Memory Controller) register
685 *
686 * @info: atom card_info pointer
687 * @reg: MC register offset
688 * @val: value to write to the pll register
689 *
690 * Provides a MC register accessor for the atom interpreter (r4xx+).
691 */
771fe6b9
JG
692static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
693{
694 struct radeon_device *rdev = info->dev->dev_private;
695
696 rdev->mc_wreg(rdev, reg, val);
697}
698
0c195119
AD
699/**
700 * cail_reg_write - write MMIO register
701 *
702 * @info: atom card_info pointer
703 * @reg: MMIO register offset
704 * @val: value to write to the pll register
705 *
706 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
707 */
771fe6b9
JG
708static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
709{
710 struct radeon_device *rdev = info->dev->dev_private;
711
712 WREG32(reg*4, val);
713}
714
0c195119
AD
715/**
716 * cail_reg_read - read MMIO register
717 *
718 * @info: atom card_info pointer
719 * @reg: MMIO register offset
720 *
721 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
722 * Returns the value of the MMIO register.
723 */
771fe6b9
JG
724static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
725{
726 struct radeon_device *rdev = info->dev->dev_private;
727 uint32_t r;
728
729 r = RREG32(reg*4);
730 return r;
731}
732
0c195119
AD
733/**
734 * cail_ioreg_write - write IO register
735 *
736 * @info: atom card_info pointer
737 * @reg: IO register offset
738 * @val: value to write to the pll register
739 *
740 * Provides a IO register accessor for the atom interpreter (r4xx+).
741 */
351a52a2
AD
742static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
743{
744 struct radeon_device *rdev = info->dev->dev_private;
745
746 WREG32_IO(reg*4, val);
747}
748
0c195119
AD
749/**
750 * cail_ioreg_read - read IO register
751 *
752 * @info: atom card_info pointer
753 * @reg: IO register offset
754 *
755 * Provides an IO register accessor for the atom interpreter (r4xx+).
756 * Returns the value of the IO register.
757 */
351a52a2
AD
758static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
759{
760 struct radeon_device *rdev = info->dev->dev_private;
761 uint32_t r;
762
763 r = RREG32_IO(reg*4);
764 return r;
765}
766
0c195119
AD
767/**
768 * radeon_atombios_init - init the driver info and callbacks for atombios
769 *
770 * @rdev: radeon_device pointer
771 *
772 * Initializes the driver info and register access callbacks for the
773 * ATOM interpreter (r4xx+).
774 * Returns 0 on sucess, -ENOMEM on failure.
775 * Called at driver startup.
776 */
771fe6b9
JG
777int radeon_atombios_init(struct radeon_device *rdev)
778{
61c4b24b
MF
779 struct card_info *atom_card_info =
780 kzalloc(sizeof(struct card_info), GFP_KERNEL);
781
782 if (!atom_card_info)
783 return -ENOMEM;
784
785 rdev->mode_info.atom_card_info = atom_card_info;
786 atom_card_info->dev = rdev->ddev;
787 atom_card_info->reg_read = cail_reg_read;
788 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
789 /* needed for iio ops */
790 if (rdev->rio_mem) {
791 atom_card_info->ioreg_read = cail_ioreg_read;
792 atom_card_info->ioreg_write = cail_ioreg_write;
793 } else {
794 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
795 atom_card_info->ioreg_read = cail_reg_read;
796 atom_card_info->ioreg_write = cail_reg_write;
797 }
61c4b24b
MF
798 atom_card_info->mc_read = cail_mc_read;
799 atom_card_info->mc_write = cail_mc_write;
800 atom_card_info->pll_read = cail_pll_read;
801 atom_card_info->pll_write = cail_pll_write;
802
803 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
804 if (!rdev->mode_info.atom_context) {
805 radeon_atombios_fini(rdev);
806 return -ENOMEM;
807 }
808
c31ad97f 809 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 810 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 811 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
812 return 0;
813}
814
0c195119
AD
815/**
816 * radeon_atombios_fini - free the driver info and callbacks for atombios
817 *
818 * @rdev: radeon_device pointer
819 *
820 * Frees the driver info and register access callbacks for the ATOM
821 * interpreter (r4xx+).
822 * Called at driver shutdown.
823 */
771fe6b9
JG
824void radeon_atombios_fini(struct radeon_device *rdev)
825{
4a04a844
JG
826 if (rdev->mode_info.atom_context) {
827 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 828 }
0e34d094
TG
829 kfree(rdev->mode_info.atom_context);
830 rdev->mode_info.atom_context = NULL;
61c4b24b 831 kfree(rdev->mode_info.atom_card_info);
0e34d094 832 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
833}
834
0c195119
AD
835/* COMBIOS */
836/*
837 * COMBIOS is the bios format prior to ATOM. It provides
838 * command tables similar to ATOM, but doesn't have a unified
839 * parser. See radeon_combios.c
840 */
841
842/**
843 * radeon_combios_init - init the driver info for combios
844 *
845 * @rdev: radeon_device pointer
846 *
847 * Initializes the driver info for combios (r1xx-r3xx).
848 * Returns 0 on sucess.
849 * Called at driver startup.
850 */
771fe6b9
JG
851int radeon_combios_init(struct radeon_device *rdev)
852{
853 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
854 return 0;
855}
856
0c195119
AD
857/**
858 * radeon_combios_fini - free the driver info for combios
859 *
860 * @rdev: radeon_device pointer
861 *
862 * Frees the driver info for combios (r1xx-r3xx).
863 * Called at driver shutdown.
864 */
771fe6b9
JG
865void radeon_combios_fini(struct radeon_device *rdev)
866{
867}
868
0c195119
AD
869/* if we get transitioned to only one device, take VGA back */
870/**
871 * radeon_vga_set_decode - enable/disable vga decode
872 *
873 * @cookie: radeon_device pointer
874 * @state: enable/disable vga decode
875 *
876 * Enable/disable vga decode (all asics).
877 * Returns VGA resource flags.
878 */
28d52043
DA
879static unsigned int radeon_vga_set_decode(void *cookie, bool state)
880{
881 struct radeon_device *rdev = cookie;
28d52043
DA
882 radeon_vga_set_state(rdev, state);
883 if (state)
884 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
885 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
886 else
887 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
888}
c1176d6f 889
1bcb04f7
CK
890/**
891 * radeon_check_pot_argument - check that argument is a power of two
892 *
893 * @arg: value to check
894 *
895 * Validates that a certain argument is a power of two (all asics).
896 * Returns true if argument is valid.
897 */
898static bool radeon_check_pot_argument(int arg)
899{
900 return (arg & (arg - 1)) == 0;
901}
902
0c195119
AD
903/**
904 * radeon_check_arguments - validate module params
905 *
906 * @rdev: radeon_device pointer
907 *
908 * Validates certain module parameters and updates
909 * the associated values used by the driver (all asics).
910 */
1109ca09 911static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
912{
913 /* vramlimit must be a power of two */
1bcb04f7 914 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
915 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
916 radeon_vram_limit);
917 radeon_vram_limit = 0;
36421338 918 }
1bcb04f7 919
36421338 920 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 921 if (radeon_gart_size < 32) {
36421338
JG
922 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
923 radeon_gart_size);
924 radeon_gart_size = 512;
1bcb04f7
CK
925
926 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
927 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
928 radeon_gart_size);
929 radeon_gart_size = 512;
36421338 930 }
1bcb04f7
CK
931 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
932
36421338
JG
933 /* AGP mode can only be -1, 1, 2, 4, 8 */
934 switch (radeon_agpmode) {
935 case -1:
936 case 0:
937 case 1:
938 case 2:
939 case 4:
940 case 8:
941 break;
942 default:
943 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
944 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
945 radeon_agpmode = 0;
946 break;
947 }
948}
949
d1f9809e
ML
950/**
951 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
952 * needed for waking up.
953 *
954 * @pdev: pci dev pointer
955 */
956static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
957{
958
959 /* 6600m in a macbook pro */
960 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
961 pdev->subsystem_device == 0x00e2) {
962 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
963 return true;
964 }
965
966 return false;
967}
968
0c195119
AD
969/**
970 * radeon_switcheroo_set_state - set switcheroo state
971 *
972 * @pdev: pci dev pointer
973 * @state: vga switcheroo state
974 *
975 * Callback for the switcheroo driver. Suspends or resumes the
976 * the asics before or after it is powered up using ACPI methods.
977 */
6a9ee8af
DA
978static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
979{
980 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
981 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
982 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
983 unsigned d3_delay = dev->pdev->d3_delay;
984
6a9ee8af
DA
985 printk(KERN_INFO "radeon: switched on\n");
986 /* don't suspend or resume card normally */
5bcf719b 987 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
988
989 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
990 dev->pdev->d3_delay = 20;
991
6a9ee8af 992 radeon_resume_kms(dev);
d1f9809e
ML
993
994 dev->pdev->d3_delay = d3_delay;
995
5bcf719b 996 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 997 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
998 } else {
999 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1000 drm_kms_helper_poll_disable(dev);
5bcf719b 1001 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 1002 radeon_suspend_kms(dev, pmm);
5bcf719b 1003 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1004 }
1005}
1006
0c195119
AD
1007/**
1008 * radeon_switcheroo_can_switch - see if switcheroo state can change
1009 *
1010 * @pdev: pci dev pointer
1011 *
1012 * Callback for the switcheroo driver. Check of the switcheroo
1013 * state can be changed.
1014 * Returns true if the state can be changed, false if not.
1015 */
6a9ee8af
DA
1016static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1017{
1018 struct drm_device *dev = pci_get_drvdata(pdev);
1019 bool can_switch;
1020
1021 spin_lock(&dev->count_lock);
1022 can_switch = (dev->open_count == 0);
1023 spin_unlock(&dev->count_lock);
1024 return can_switch;
1025}
1026
26ec685f
TI
1027static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1028 .set_gpu_state = radeon_switcheroo_set_state,
1029 .reprobe = NULL,
1030 .can_switch = radeon_switcheroo_can_switch,
1031};
6a9ee8af 1032
0c195119
AD
1033/**
1034 * radeon_device_init - initialize the driver
1035 *
1036 * @rdev: radeon_device pointer
1037 * @pdev: drm dev pointer
1038 * @pdev: pci dev pointer
1039 * @flags: driver flags
1040 *
1041 * Initializes the driver info and hw (all asics).
1042 * Returns 0 for success or an error on failure.
1043 * Called at driver startup.
1044 */
771fe6b9
JG
1045int radeon_device_init(struct radeon_device *rdev,
1046 struct drm_device *ddev,
1047 struct pci_dev *pdev,
1048 uint32_t flags)
1049{
351a52a2 1050 int r, i;
ad49f501 1051 int dma_bits;
771fe6b9 1052
771fe6b9 1053 rdev->shutdown = false;
9f022ddf 1054 rdev->dev = &pdev->dev;
771fe6b9
JG
1055 rdev->ddev = ddev;
1056 rdev->pdev = pdev;
1057 rdev->flags = flags;
1058 rdev->family = flags & RADEON_FAMILY_MASK;
1059 rdev->is_atom_bios = false;
1060 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1061 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 1062 rdev->accel_working = false;
8b25ed34
AD
1063 /* set up ring ids */
1064 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1065 rdev->ring[i].idx = i;
1066 }
1b5331d9 1067
d522d9cc
TR
1068 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1069 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1070 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1071
771fe6b9
JG
1072 /* mutex initialization are all done here so we
1073 * can recall function without having locking issues */
d6999bc7 1074 mutex_init(&rdev->ring_lock);
40bacf16 1075 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1076 atomic_set(&rdev->ih.lock, 0);
4c788679 1077 mutex_init(&rdev->gem.mutex);
c913e23a 1078 mutex_init(&rdev->pm.mutex);
6759a0a7 1079 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1080 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1081 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1082 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1083 r = radeon_gem_init(rdev);
1084 if (r)
1085 return r;
721604a1 1086 /* initialize vm here */
36ff39c4 1087 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1088 /* Adjust VM size here.
1089 * Currently set to 4GB ((1 << 20) 4k pages).
1090 * Max GPUVM size for cayman and SI is 40 bits.
1091 */
721604a1
JG
1092 rdev->vm_manager.max_pfn = 1 << 20;
1093 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1094
4aac0473
JG
1095 /* Set asic functions */
1096 r = radeon_asic_init(rdev);
36421338 1097 if (r)
4aac0473 1098 return r;
36421338 1099 radeon_check_arguments(rdev);
4aac0473 1100
f95df9ca
AD
1101 /* all of the newer IGP chips have an internal gart
1102 * However some rs4xx report as AGP, so remove that here.
1103 */
1104 if ((rdev->family >= CHIP_RS400) &&
1105 (rdev->flags & RADEON_IS_IGP)) {
1106 rdev->flags &= ~RADEON_IS_AGP;
1107 }
1108
30256a3f 1109 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1110 radeon_agp_disable(rdev);
771fe6b9
JG
1111 }
1112
9ed8b1f9
AD
1113 /* Set the internal MC address mask
1114 * This is the max address of the GPU's
1115 * internal address space.
1116 */
1117 if (rdev->family >= CHIP_CAYMAN)
1118 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1119 else if (rdev->family >= CHIP_CEDAR)
1120 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1121 else
1122 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1123
ad49f501
DA
1124 /* set DMA mask + need_dma32 flags.
1125 * PCIE - can handle 40-bits.
005a83f1 1126 * IGP - can handle 40-bits
ad49f501 1127 * AGP - generally dma32 is safest
005a83f1 1128 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1129 */
1130 rdev->need_dma32 = false;
1131 if (rdev->flags & RADEON_IS_AGP)
1132 rdev->need_dma32 = true;
005a83f1 1133 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1134 (rdev->family <= CHIP_RS740))
ad49f501
DA
1135 rdev->need_dma32 = true;
1136
1137 dma_bits = rdev->need_dma32 ? 32 : 40;
1138 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1139 if (r) {
62fff811 1140 rdev->need_dma32 = true;
c52494f6 1141 dma_bits = 32;
771fe6b9
JG
1142 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1143 }
c52494f6
KRW
1144 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1145 if (r) {
1146 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1147 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1148 }
771fe6b9
JG
1149
1150 /* Registers mapping */
1151 /* TODO: block userspace mapping of io register */
2c385151 1152 spin_lock_init(&rdev->mmio_idx_lock);
01d73a69
JC
1153 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1154 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
1155 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1156 if (rdev->rmmio == NULL) {
1157 return -ENOMEM;
1158 }
1159 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1160 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1161
351a52a2
AD
1162 /* io port mapping */
1163 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1164 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1165 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1166 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1167 break;
1168 }
1169 }
1170 if (rdev->rio_mem == NULL)
1171 DRM_ERROR("Unable to find PCI I/O BAR\n");
1172
28d52043 1173 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1174 /* this will fail for cards that aren't VGA class devices, just
1175 * ignore it */
1176 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1177 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1178
3ce0a23d 1179 r = radeon_init(rdev);
b574f251 1180 if (r)
3ce0a23d 1181 return r;
3ce0a23d 1182
04eb2206
CK
1183 r = radeon_ib_ring_tests(rdev);
1184 if (r)
1185 DRM_ERROR("ib ring test failed (%d).\n", r);
1186
409851f4
JG
1187 r = radeon_gem_debugfs_init(rdev);
1188 if (r) {
1189 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1190 }
1191
b574f251
JG
1192 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1193 /* Acceleration not working on AGP card try again
1194 * with fallback to PCI or PCIE GART
1195 */
a2d07b74 1196 radeon_asic_reset(rdev);
b574f251
JG
1197 radeon_fini(rdev);
1198 radeon_agp_disable(rdev);
1199 r = radeon_init(rdev);
4aac0473
JG
1200 if (r)
1201 return r;
771fe6b9 1202 }
60a7e396 1203 if ((radeon_testing & 1)) {
ecc0b326
MD
1204 radeon_test_moves(rdev);
1205 }
60a7e396
CK
1206 if ((radeon_testing & 2)) {
1207 radeon_test_syncing(rdev);
1208 }
771fe6b9 1209 if (radeon_benchmarking) {
638dd7db 1210 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1211 }
6cf8a3f5 1212 return 0;
771fe6b9
JG
1213}
1214
4d8bf9ae
CK
1215static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1216
0c195119
AD
1217/**
1218 * radeon_device_fini - tear down the driver
1219 *
1220 * @rdev: radeon_device pointer
1221 *
1222 * Tear down the driver info (all asics).
1223 * Called at driver shutdown.
1224 */
771fe6b9
JG
1225void radeon_device_fini(struct radeon_device *rdev)
1226{
771fe6b9
JG
1227 DRM_INFO("radeon: finishing device.\n");
1228 rdev->shutdown = true;
90aca4d2
JG
1229 /* evict vram memory */
1230 radeon_bo_evict_vram(rdev);
62a8ea3f 1231 radeon_fini(rdev);
6a9ee8af 1232 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1233 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1234 if (rdev->rio_mem)
1235 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1236 rdev->rio_mem = NULL;
771fe6b9
JG
1237 iounmap(rdev->rmmio);
1238 rdev->rmmio = NULL;
4d8bf9ae 1239 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1240}
1241
1242
1243/*
1244 * Suspend & resume.
1245 */
0c195119
AD
1246/**
1247 * radeon_suspend_kms - initiate device suspend
1248 *
1249 * @pdev: drm dev pointer
1250 * @state: suspend state
1251 *
1252 * Puts the hw in the suspend state (all asics).
1253 * Returns 0 for success or an error on failure.
1254 * Called at driver suspend.
1255 */
771fe6b9
JG
1256int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1257{
875c1866 1258 struct radeon_device *rdev;
771fe6b9 1259 struct drm_crtc *crtc;
d8dcaa1d 1260 struct drm_connector *connector;
7465280c 1261 int i, r;
5f8f635e 1262 bool force_completion = false;
771fe6b9 1263
875c1866 1264 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1265 return -ENODEV;
1266 }
1267 if (state.event == PM_EVENT_PRETHAW) {
1268 return 0;
1269 }
875c1866
DJ
1270 rdev = dev->dev_private;
1271
5bcf719b 1272 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1273 return 0;
d8dcaa1d 1274
86698c20
SF
1275 drm_kms_helper_poll_disable(dev);
1276
d8dcaa1d
AD
1277 /* turn off display hw */
1278 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1279 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1280 }
1281
771fe6b9
JG
1282 /* unpin the front buffers */
1283 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1284 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1285 struct radeon_bo *robj;
771fe6b9
JG
1286
1287 if (rfb == NULL || rfb->obj == NULL) {
1288 continue;
1289 }
7e4d15d9 1290 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1291 /* don't unpin kernel fb objects */
1292 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1293 r = radeon_bo_reserve(robj, false);
38651674 1294 if (r == 0) {
4c788679
JG
1295 radeon_bo_unpin(robj);
1296 radeon_bo_unreserve(robj);
1297 }
771fe6b9
JG
1298 }
1299 }
1300 /* evict vram memory */
4c788679 1301 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1302
1303 mutex_lock(&rdev->ring_lock);
771fe6b9 1304 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1305 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1306 r = radeon_fence_wait_empty_locked(rdev, i);
1307 if (r) {
1308 /* delay GPU reset to resume */
1309 force_completion = true;
1310 }
1311 }
1312 if (force_completion) {
1313 radeon_fence_driver_force_completion(rdev);
1314 }
8a47cc9e 1315 mutex_unlock(&rdev->ring_lock);
771fe6b9 1316
f657c2a7
YZ
1317 radeon_save_bios_scratch_regs(rdev);
1318
ce8f5370 1319 radeon_pm_suspend(rdev);
62a8ea3f 1320 radeon_suspend(rdev);
d4877cf2 1321 radeon_hpd_fini(rdev);
771fe6b9 1322 /* evict remaining vram memory */
4c788679 1323 radeon_bo_evict_vram(rdev);
771fe6b9 1324
10b06122
JG
1325 radeon_agp_suspend(rdev);
1326
771fe6b9
JG
1327 pci_save_state(dev->pdev);
1328 if (state.event == PM_EVENT_SUSPEND) {
1329 /* Shut down the device */
1330 pci_disable_device(dev->pdev);
1331 pci_set_power_state(dev->pdev, PCI_D3hot);
1332 }
ac751efa 1333 console_lock();
38651674 1334 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1335 console_unlock();
771fe6b9
JG
1336 return 0;
1337}
1338
0c195119
AD
1339/**
1340 * radeon_resume_kms - initiate device resume
1341 *
1342 * @pdev: drm dev pointer
1343 *
1344 * Bring the hw back to operating state (all asics).
1345 * Returns 0 for success or an error on failure.
1346 * Called at driver resume.
1347 */
771fe6b9
JG
1348int radeon_resume_kms(struct drm_device *dev)
1349{
09bdf591 1350 struct drm_connector *connector;
771fe6b9 1351 struct radeon_device *rdev = dev->dev_private;
04eb2206 1352 int r;
771fe6b9 1353
5bcf719b 1354 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1355 return 0;
1356
ac751efa 1357 console_lock();
771fe6b9
JG
1358 pci_set_power_state(dev->pdev, PCI_D0);
1359 pci_restore_state(dev->pdev);
1360 if (pci_enable_device(dev->pdev)) {
ac751efa 1361 console_unlock();
771fe6b9
JG
1362 return -1;
1363 }
0ebf1717
DA
1364 /* resume AGP if in use */
1365 radeon_agp_resume(rdev);
62a8ea3f 1366 radeon_resume(rdev);
04eb2206
CK
1367
1368 r = radeon_ib_ring_tests(rdev);
1369 if (r)
1370 DRM_ERROR("ib ring test failed (%d).\n", r);
1371
ce8f5370 1372 radeon_pm_resume(rdev);
f657c2a7 1373 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1374
38651674 1375 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1376 console_unlock();
771fe6b9 1377
3fa47d9e
AD
1378 /* init dig PHYs, disp eng pll */
1379 if (rdev->is_atom_bios) {
ac89af1e 1380 radeon_atom_encoder_init(rdev);
f3f1f03e 1381 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1382 /* turn on the BL */
1383 if (rdev->mode_info.bl_encoder) {
1384 u8 bl_level = radeon_get_backlight_level(rdev,
1385 rdev->mode_info.bl_encoder);
1386 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1387 bl_level);
1388 }
3fa47d9e 1389 }
d4877cf2
AD
1390 /* reset hpd state */
1391 radeon_hpd_init(rdev);
771fe6b9
JG
1392 /* blat the mode back in */
1393 drm_helper_resume_force_mode(dev);
a93f344d
AD
1394 /* turn on display hw */
1395 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1396 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1397 }
86698c20
SF
1398
1399 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1400 return 0;
1401}
1402
0c195119
AD
1403/**
1404 * radeon_gpu_reset - reset the asic
1405 *
1406 * @rdev: radeon device pointer
1407 *
1408 * Attempt the reset the GPU if it has hung (all asics).
1409 * Returns 0 for success or an error on failure.
1410 */
90aca4d2
JG
1411int radeon_gpu_reset(struct radeon_device *rdev)
1412{
55d7c221
CK
1413 unsigned ring_sizes[RADEON_NUM_RINGS];
1414 uint32_t *ring_data[RADEON_NUM_RINGS];
1415
1416 bool saved = false;
1417
1418 int i, r;
8fd1b84c 1419 int resched;
90aca4d2 1420
dee53e7f 1421 down_write(&rdev->exclusive_lock);
90aca4d2 1422 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1423 /* block TTM */
1424 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1425 radeon_suspend(rdev);
1426
55d7c221
CK
1427 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1428 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1429 &ring_data[i]);
1430 if (ring_sizes[i]) {
1431 saved = true;
1432 dev_info(rdev->dev, "Saved %d dwords of commands "
1433 "on ring %d.\n", ring_sizes[i], i);
1434 }
1435 }
1436
1437retry:
90aca4d2
JG
1438 r = radeon_asic_reset(rdev);
1439 if (!r) {
55d7c221 1440 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1441 radeon_resume(rdev);
55d7c221 1442 }
04eb2206 1443
55d7c221 1444 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1445
55d7c221
CK
1446 if (!r) {
1447 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1448 radeon_ring_restore(rdev, &rdev->ring[i],
1449 ring_sizes[i], ring_data[i]);
f54b350d
CK
1450 ring_sizes[i] = 0;
1451 ring_data[i] = NULL;
55d7c221
CK
1452 }
1453
1454 r = radeon_ib_ring_tests(rdev);
1455 if (r) {
1456 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1457 if (saved) {
f54b350d 1458 saved = false;
55d7c221
CK
1459 radeon_suspend(rdev);
1460 goto retry;
1461 }
1462 }
1463 } else {
76903b96 1464 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1465 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1466 kfree(ring_data[i]);
1467 }
90aca4d2 1468 }
7a1619b9 1469
d3493574
JG
1470 drm_helper_resume_force_mode(rdev->ddev);
1471
55d7c221 1472 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1473 if (r) {
1474 /* bad news, how to tell it to userspace ? */
1475 dev_info(rdev->dev, "GPU reset failed\n");
1476 }
1477
dee53e7f 1478 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1479 return r;
1480}
1481
771fe6b9
JG
1482
1483/*
1484 * Debugfs
1485 */
771fe6b9
JG
1486int radeon_debugfs_add_files(struct radeon_device *rdev,
1487 struct drm_info_list *files,
1488 unsigned nfiles)
1489{
1490 unsigned i;
1491
4d8bf9ae
CK
1492 for (i = 0; i < rdev->debugfs_count; i++) {
1493 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1494 /* Already registered */
1495 return 0;
1496 }
1497 }
c245cb9e 1498
4d8bf9ae 1499 i = rdev->debugfs_count + 1;
c245cb9e
MW
1500 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1501 DRM_ERROR("Reached maximum number of debugfs components.\n");
1502 DRM_ERROR("Report so we increase "
1503 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1504 return -EINVAL;
1505 }
4d8bf9ae
CK
1506 rdev->debugfs[rdev->debugfs_count].files = files;
1507 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1508 rdev->debugfs_count = i;
771fe6b9
JG
1509#if defined(CONFIG_DEBUG_FS)
1510 drm_debugfs_create_files(files, nfiles,
1511 rdev->ddev->control->debugfs_root,
1512 rdev->ddev->control);
1513 drm_debugfs_create_files(files, nfiles,
1514 rdev->ddev->primary->debugfs_root,
1515 rdev->ddev->primary);
1516#endif
1517 return 0;
1518}
1519
4d8bf9ae
CK
1520static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1521{
1522#if defined(CONFIG_DEBUG_FS)
1523 unsigned i;
1524
1525 for (i = 0; i < rdev->debugfs_count; i++) {
1526 drm_debugfs_remove_files(rdev->debugfs[i].files,
1527 rdev->debugfs[i].num_files,
1528 rdev->ddev->control);
1529 drm_debugfs_remove_files(rdev->debugfs[i].files,
1530 rdev->debugfs[i].num_files,
1531 rdev->ddev->primary);
1532 }
1533#endif
1534}
1535
771fe6b9
JG
1536#if defined(CONFIG_DEBUG_FS)
1537int radeon_debugfs_init(struct drm_minor *minor)
1538{
1539 return 0;
1540}
1541
1542void radeon_debugfs_cleanup(struct drm_minor *minor)
1543{
771fe6b9
JG
1544}
1545#endif
This page took 0.318958 seconds and 5 git commands to generate.