drm/radeon: fix typo in si_select_se_sh()
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
1b5331d9
JG
97 "LAST",
98};
99
0c195119
AD
100/**
101 * radeon_surface_init - Clear GPU surface registers.
102 *
103 * @rdev: radeon_device pointer
104 *
105 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 106 */
3ce0a23d 107void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
108{
109 /* FIXME: check this out */
110 if (rdev->family < CHIP_R600) {
111 int i;
112
550e2d92
DA
113 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
114 if (rdev->surface_regs[i].bo)
115 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
116 else
117 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 118 }
e024e110
DA
119 /* enable surfaces */
120 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
121 }
122}
123
771fe6b9
JG
124/*
125 * GPU scratch registers helpers function.
126 */
0c195119
AD
127/**
128 * radeon_scratch_init - Init scratch register driver information.
129 *
130 * @rdev: radeon_device pointer
131 *
132 * Init CP scratch register driver information (r1xx-r5xx)
133 */
3ce0a23d 134void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
135{
136 int i;
137
138 /* FIXME: check this out */
139 if (rdev->family < CHIP_R300) {
140 rdev->scratch.num_reg = 5;
141 } else {
142 rdev->scratch.num_reg = 7;
143 }
724c80e1 144 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
145 for (i = 0; i < rdev->scratch.num_reg; i++) {
146 rdev->scratch.free[i] = true;
724c80e1 147 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
148 }
149}
150
0c195119
AD
151/**
152 * radeon_scratch_get - Allocate a scratch register
153 *
154 * @rdev: radeon_device pointer
155 * @reg: scratch register mmio offset
156 *
157 * Allocate a CP scratch register for use by the driver (all asics).
158 * Returns 0 on success or -EINVAL on failure.
159 */
771fe6b9
JG
160int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
161{
162 int i;
163
164 for (i = 0; i < rdev->scratch.num_reg; i++) {
165 if (rdev->scratch.free[i]) {
166 rdev->scratch.free[i] = false;
167 *reg = rdev->scratch.reg[i];
168 return 0;
169 }
170 }
171 return -EINVAL;
172}
173
0c195119
AD
174/**
175 * radeon_scratch_free - Free a scratch register
176 *
177 * @rdev: radeon_device pointer
178 * @reg: scratch register mmio offset
179 *
180 * Free a CP scratch register allocated for use by the driver (all asics)
181 */
771fe6b9
JG
182void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
183{
184 int i;
185
186 for (i = 0; i < rdev->scratch.num_reg; i++) {
187 if (rdev->scratch.reg[i] == reg) {
188 rdev->scratch.free[i] = true;
189 return;
190 }
191 }
192}
193
0c195119
AD
194/*
195 * radeon_wb_*()
196 * Writeback is the the method by which the the GPU updates special pages
197 * in memory with the status of certain GPU events (fences, ring pointers,
198 * etc.).
199 */
200
201/**
202 * radeon_wb_disable - Disable Writeback
203 *
204 * @rdev: radeon_device pointer
205 *
206 * Disables Writeback (all asics). Used for suspend.
207 */
724c80e1
AD
208void radeon_wb_disable(struct radeon_device *rdev)
209{
210 int r;
211
212 if (rdev->wb.wb_obj) {
213 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
214 if (unlikely(r != 0))
215 return;
216 radeon_bo_kunmap(rdev->wb.wb_obj);
217 radeon_bo_unpin(rdev->wb.wb_obj);
218 radeon_bo_unreserve(rdev->wb.wb_obj);
219 }
220 rdev->wb.enabled = false;
221}
222
0c195119
AD
223/**
224 * radeon_wb_fini - Disable Writeback and free memory
225 *
226 * @rdev: radeon_device pointer
227 *
228 * Disables Writeback and frees the Writeback memory (all asics).
229 * Used at driver shutdown.
230 */
724c80e1
AD
231void radeon_wb_fini(struct radeon_device *rdev)
232{
233 radeon_wb_disable(rdev);
234 if (rdev->wb.wb_obj) {
235 radeon_bo_unref(&rdev->wb.wb_obj);
236 rdev->wb.wb = NULL;
237 rdev->wb.wb_obj = NULL;
238 }
239}
240
0c195119
AD
241/**
242 * radeon_wb_init- Init Writeback driver info and allocate memory
243 *
244 * @rdev: radeon_device pointer
245 *
246 * Disables Writeback and frees the Writeback memory (all asics).
247 * Used at driver startup.
248 * Returns 0 on success or an -error on failure.
249 */
724c80e1
AD
250int radeon_wb_init(struct radeon_device *rdev)
251{
252 int r;
253
254 if (rdev->wb.wb_obj == NULL) {
441921d5 255 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 256 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
257 if (r) {
258 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
259 return r;
260 }
261 }
262 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
263 if (unlikely(r != 0)) {
264 radeon_wb_fini(rdev);
265 return r;
266 }
267 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
268 &rdev->wb.gpu_addr);
269 if (r) {
270 radeon_bo_unreserve(rdev->wb.wb_obj);
271 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
272 radeon_wb_fini(rdev);
273 return r;
274 }
275 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
276 radeon_bo_unreserve(rdev->wb.wb_obj);
277 if (r) {
278 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
279 radeon_wb_fini(rdev);
280 return r;
281 }
282
e6ba7599
AD
283 /* clear wb memory */
284 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
285 /* disable event_write fences */
286 rdev->wb.use_event = false;
724c80e1 287 /* disabled via module param */
3b7a2b24 288 if (radeon_no_wb == 1) {
724c80e1 289 rdev->wb.enabled = false;
3b7a2b24 290 } else {
724c80e1 291 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
292 /* often unreliable on AGP */
293 rdev->wb.enabled = false;
294 } else if (rdev->family < CHIP_R300) {
295 /* often unreliable on pre-r300 */
724c80e1 296 rdev->wb.enabled = false;
d0f8a854 297 } else {
724c80e1 298 rdev->wb.enabled = true;
d0f8a854 299 /* event_write fences are only available on r600+ */
3b7a2b24 300 if (rdev->family >= CHIP_R600) {
d0f8a854 301 rdev->wb.use_event = true;
3b7a2b24 302 }
d0f8a854 303 }
724c80e1 304 }
c994ead6
AD
305 /* always use writeback/events on NI, APUs */
306 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
307 rdev->wb.enabled = true;
308 rdev->wb.use_event = true;
309 }
724c80e1
AD
310
311 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
312
313 return 0;
314}
315
d594e46a
JG
316/**
317 * radeon_vram_location - try to find VRAM location
318 * @rdev: radeon device structure holding all necessary informations
319 * @mc: memory controller structure holding memory informations
320 * @base: base address at which to put VRAM
321 *
322 * Function will place try to place VRAM at base address provided
323 * as parameter (which is so far either PCI aperture address or
324 * for IGP TOM base address).
325 *
326 * If there is not enough space to fit the unvisible VRAM in the 32bits
327 * address space then we limit the VRAM size to the aperture.
328 *
329 * If we are using AGP and if the AGP aperture doesn't allow us to have
330 * room for all the VRAM than we restrict the VRAM to the PCI aperture
331 * size and print a warning.
332 *
333 * This function will never fails, worst case are limiting VRAM.
334 *
335 * Note: GTT start, end, size should be initialized before calling this
336 * function on AGP platform.
337 *
25985edc 338 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
339 * this shouldn't be a problem as we are using the PCI aperture as a reference.
340 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
341 * not IGP.
342 *
343 * Note: we use mc_vram_size as on some board we need to program the mc to
344 * cover the whole aperture even if VRAM size is inferior to aperture size
345 * Novell bug 204882 + along with lots of ubuntu ones
346 *
347 * Note: when limiting vram it's safe to overwritte real_vram_size because
348 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
349 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
350 * ones)
351 *
352 * Note: IGP TOM addr should be the same as the aperture addr, we don't
353 * explicitly check for that thought.
354 *
355 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 356 */
d594e46a 357void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 358{
1bcb04f7
CK
359 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
360
d594e46a 361 mc->vram_start = base;
9ed8b1f9 362 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
363 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
364 mc->real_vram_size = mc->aper_size;
365 mc->mc_vram_size = mc->aper_size;
366 }
367 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 368 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
369 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
370 mc->real_vram_size = mc->aper_size;
371 mc->mc_vram_size = mc->aper_size;
372 }
373 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
374 if (limit && limit < mc->real_vram_size)
375 mc->real_vram_size = limit;
dd7cc55a 376 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
377 mc->mc_vram_size >> 20, mc->vram_start,
378 mc->vram_end, mc->real_vram_size >> 20);
379}
771fe6b9 380
d594e46a
JG
381/**
382 * radeon_gtt_location - try to find GTT location
383 * @rdev: radeon device structure holding all necessary informations
384 * @mc: memory controller structure holding memory informations
385 *
386 * Function will place try to place GTT before or after VRAM.
387 *
388 * If GTT size is bigger than space left then we ajust GTT size.
389 * Thus function will never fails.
390 *
391 * FIXME: when reducing GTT size align new size on power of 2.
392 */
393void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
394{
395 u64 size_af, size_bf;
396
9ed8b1f9 397 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 398 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
399 if (size_bf > size_af) {
400 if (mc->gtt_size > size_bf) {
401 dev_warn(rdev->dev, "limiting GTT\n");
402 mc->gtt_size = size_bf;
771fe6b9 403 }
8d369bb1 404 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 405 } else {
d594e46a
JG
406 if (mc->gtt_size > size_af) {
407 dev_warn(rdev->dev, "limiting GTT\n");
408 mc->gtt_size = size_af;
409 }
8d369bb1 410 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 411 }
d594e46a 412 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 413 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 414 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
415}
416
771fe6b9
JG
417/*
418 * GPU helpers function.
419 */
0c195119
AD
420/**
421 * radeon_card_posted - check if the hw has already been initialized
422 *
423 * @rdev: radeon_device pointer
424 *
425 * Check if the asic has been initialized (all asics).
426 * Used at driver startup.
427 * Returns true if initialized or false if not.
428 */
9f022ddf 429bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
430{
431 uint32_t reg;
432
83e68189
MF
433 if (efi_enabled(EFI_BOOT) &&
434 rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
bcc65fd8
MG
435 return false;
436
771fe6b9 437 /* first check CRTCs */
18007401
AD
438 if (ASIC_IS_DCE41(rdev)) {
439 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
440 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
441 if (reg & EVERGREEN_CRTC_MASTER_EN)
442 return true;
443 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
444 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
445 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
446 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
447 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
448 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
449 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
450 if (reg & EVERGREEN_CRTC_MASTER_EN)
451 return true;
452 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
453 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
454 RREG32(AVIVO_D2CRTC_CONTROL);
455 if (reg & AVIVO_CRTC_EN) {
456 return true;
457 }
458 } else {
459 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
460 RREG32(RADEON_CRTC2_GEN_CNTL);
461 if (reg & RADEON_CRTC_EN) {
462 return true;
463 }
464 }
465
466 /* then check MEM_SIZE, in case the crtcs are off */
467 if (rdev->family >= CHIP_R600)
468 reg = RREG32(R600_CONFIG_MEMSIZE);
469 else
470 reg = RREG32(RADEON_CONFIG_MEMSIZE);
471
472 if (reg)
473 return true;
474
475 return false;
476
477}
478
0c195119
AD
479/**
480 * radeon_update_bandwidth_info - update display bandwidth params
481 *
482 * @rdev: radeon_device pointer
483 *
484 * Used when sclk/mclk are switched or display modes are set.
485 * params are used to calculate display watermarks (all asics)
486 */
f47299c5
AD
487void radeon_update_bandwidth_info(struct radeon_device *rdev)
488{
489 fixed20_12 a;
8807286e
AD
490 u32 sclk = rdev->pm.current_sclk;
491 u32 mclk = rdev->pm.current_mclk;
f47299c5 492
8807286e
AD
493 /* sclk/mclk in Mhz */
494 a.full = dfixed_const(100);
495 rdev->pm.sclk.full = dfixed_const(sclk);
496 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
497 rdev->pm.mclk.full = dfixed_const(mclk);
498 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 499
8807286e 500 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 501 a.full = dfixed_const(16);
f47299c5 502 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 503 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
504 }
505}
506
0c195119
AD
507/**
508 * radeon_boot_test_post_card - check and possibly initialize the hw
509 *
510 * @rdev: radeon_device pointer
511 *
512 * Check if the asic is initialized and if not, attempt to initialize
513 * it (all asics).
514 * Returns true if initialized or false if not.
515 */
72542d77
DA
516bool radeon_boot_test_post_card(struct radeon_device *rdev)
517{
518 if (radeon_card_posted(rdev))
519 return true;
520
521 if (rdev->bios) {
522 DRM_INFO("GPU not posted. posting now...\n");
523 if (rdev->is_atom_bios)
524 atom_asic_init(rdev->mode_info.atom_context);
525 else
526 radeon_combios_asic_init(rdev->ddev);
527 return true;
528 } else {
529 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
530 return false;
531 }
532}
533
0c195119
AD
534/**
535 * radeon_dummy_page_init - init dummy page used by the driver
536 *
537 * @rdev: radeon_device pointer
538 *
539 * Allocate the dummy page used by the driver (all asics).
540 * This dummy page is used by the driver as a filler for gart entries
541 * when pages are taken out of the GART
542 * Returns 0 on sucess, -ENOMEM on failure.
543 */
3ce0a23d
JG
544int radeon_dummy_page_init(struct radeon_device *rdev)
545{
82568565
DA
546 if (rdev->dummy_page.page)
547 return 0;
3ce0a23d
JG
548 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
549 if (rdev->dummy_page.page == NULL)
550 return -ENOMEM;
551 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
552 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
553 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
554 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
555 __free_page(rdev->dummy_page.page);
556 rdev->dummy_page.page = NULL;
557 return -ENOMEM;
558 }
559 return 0;
560}
561
0c195119
AD
562/**
563 * radeon_dummy_page_fini - free dummy page used by the driver
564 *
565 * @rdev: radeon_device pointer
566 *
567 * Frees the dummy page used by the driver (all asics).
568 */
3ce0a23d
JG
569void radeon_dummy_page_fini(struct radeon_device *rdev)
570{
571 if (rdev->dummy_page.page == NULL)
572 return;
573 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
574 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
575 __free_page(rdev->dummy_page.page);
576 rdev->dummy_page.page = NULL;
577}
578
771fe6b9 579
771fe6b9 580/* ATOM accessor methods */
0c195119
AD
581/*
582 * ATOM is an interpreted byte code stored in tables in the vbios. The
583 * driver registers callbacks to access registers and the interpreter
584 * in the driver parses the tables and executes then to program specific
585 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
586 * atombios.h, and atom.c
587 */
588
589/**
590 * cail_pll_read - read PLL register
591 *
592 * @info: atom card_info pointer
593 * @reg: PLL register offset
594 *
595 * Provides a PLL register accessor for the atom interpreter (r4xx+).
596 * Returns the value of the PLL register.
597 */
771fe6b9
JG
598static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
599{
600 struct radeon_device *rdev = info->dev->dev_private;
601 uint32_t r;
602
603 r = rdev->pll_rreg(rdev, reg);
604 return r;
605}
606
0c195119
AD
607/**
608 * cail_pll_write - write PLL register
609 *
610 * @info: atom card_info pointer
611 * @reg: PLL register offset
612 * @val: value to write to the pll register
613 *
614 * Provides a PLL register accessor for the atom interpreter (r4xx+).
615 */
771fe6b9
JG
616static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
617{
618 struct radeon_device *rdev = info->dev->dev_private;
619
620 rdev->pll_wreg(rdev, reg, val);
621}
622
0c195119
AD
623/**
624 * cail_mc_read - read MC (Memory Controller) register
625 *
626 * @info: atom card_info pointer
627 * @reg: MC register offset
628 *
629 * Provides an MC register accessor for the atom interpreter (r4xx+).
630 * Returns the value of the MC register.
631 */
771fe6b9
JG
632static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
633{
634 struct radeon_device *rdev = info->dev->dev_private;
635 uint32_t r;
636
637 r = rdev->mc_rreg(rdev, reg);
638 return r;
639}
640
0c195119
AD
641/**
642 * cail_mc_write - write MC (Memory Controller) register
643 *
644 * @info: atom card_info pointer
645 * @reg: MC register offset
646 * @val: value to write to the pll register
647 *
648 * Provides a MC register accessor for the atom interpreter (r4xx+).
649 */
771fe6b9
JG
650static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
651{
652 struct radeon_device *rdev = info->dev->dev_private;
653
654 rdev->mc_wreg(rdev, reg, val);
655}
656
0c195119
AD
657/**
658 * cail_reg_write - write MMIO register
659 *
660 * @info: atom card_info pointer
661 * @reg: MMIO register offset
662 * @val: value to write to the pll register
663 *
664 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
665 */
771fe6b9
JG
666static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
667{
668 struct radeon_device *rdev = info->dev->dev_private;
669
670 WREG32(reg*4, val);
671}
672
0c195119
AD
673/**
674 * cail_reg_read - read MMIO register
675 *
676 * @info: atom card_info pointer
677 * @reg: MMIO register offset
678 *
679 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
680 * Returns the value of the MMIO register.
681 */
771fe6b9
JG
682static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
683{
684 struct radeon_device *rdev = info->dev->dev_private;
685 uint32_t r;
686
687 r = RREG32(reg*4);
688 return r;
689}
690
0c195119
AD
691/**
692 * cail_ioreg_write - write IO register
693 *
694 * @info: atom card_info pointer
695 * @reg: IO register offset
696 * @val: value to write to the pll register
697 *
698 * Provides a IO register accessor for the atom interpreter (r4xx+).
699 */
351a52a2
AD
700static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
701{
702 struct radeon_device *rdev = info->dev->dev_private;
703
704 WREG32_IO(reg*4, val);
705}
706
0c195119
AD
707/**
708 * cail_ioreg_read - read IO register
709 *
710 * @info: atom card_info pointer
711 * @reg: IO register offset
712 *
713 * Provides an IO register accessor for the atom interpreter (r4xx+).
714 * Returns the value of the IO register.
715 */
351a52a2
AD
716static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
717{
718 struct radeon_device *rdev = info->dev->dev_private;
719 uint32_t r;
720
721 r = RREG32_IO(reg*4);
722 return r;
723}
724
0c195119
AD
725/**
726 * radeon_atombios_init - init the driver info and callbacks for atombios
727 *
728 * @rdev: radeon_device pointer
729 *
730 * Initializes the driver info and register access callbacks for the
731 * ATOM interpreter (r4xx+).
732 * Returns 0 on sucess, -ENOMEM on failure.
733 * Called at driver startup.
734 */
771fe6b9
JG
735int radeon_atombios_init(struct radeon_device *rdev)
736{
61c4b24b
MF
737 struct card_info *atom_card_info =
738 kzalloc(sizeof(struct card_info), GFP_KERNEL);
739
740 if (!atom_card_info)
741 return -ENOMEM;
742
743 rdev->mode_info.atom_card_info = atom_card_info;
744 atom_card_info->dev = rdev->ddev;
745 atom_card_info->reg_read = cail_reg_read;
746 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
747 /* needed for iio ops */
748 if (rdev->rio_mem) {
749 atom_card_info->ioreg_read = cail_ioreg_read;
750 atom_card_info->ioreg_write = cail_ioreg_write;
751 } else {
752 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
753 atom_card_info->ioreg_read = cail_reg_read;
754 atom_card_info->ioreg_write = cail_reg_write;
755 }
61c4b24b
MF
756 atom_card_info->mc_read = cail_mc_read;
757 atom_card_info->mc_write = cail_mc_write;
758 atom_card_info->pll_read = cail_pll_read;
759 atom_card_info->pll_write = cail_pll_write;
760
761 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
762 if (!rdev->mode_info.atom_context) {
763 radeon_atombios_fini(rdev);
764 return -ENOMEM;
765 }
766
c31ad97f 767 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 768 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 769 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
770 return 0;
771}
772
0c195119
AD
773/**
774 * radeon_atombios_fini - free the driver info and callbacks for atombios
775 *
776 * @rdev: radeon_device pointer
777 *
778 * Frees the driver info and register access callbacks for the ATOM
779 * interpreter (r4xx+).
780 * Called at driver shutdown.
781 */
771fe6b9
JG
782void radeon_atombios_fini(struct radeon_device *rdev)
783{
4a04a844
JG
784 if (rdev->mode_info.atom_context) {
785 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 786 }
0e34d094
TG
787 kfree(rdev->mode_info.atom_context);
788 rdev->mode_info.atom_context = NULL;
61c4b24b 789 kfree(rdev->mode_info.atom_card_info);
0e34d094 790 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
791}
792
0c195119
AD
793/* COMBIOS */
794/*
795 * COMBIOS is the bios format prior to ATOM. It provides
796 * command tables similar to ATOM, but doesn't have a unified
797 * parser. See radeon_combios.c
798 */
799
800/**
801 * radeon_combios_init - init the driver info for combios
802 *
803 * @rdev: radeon_device pointer
804 *
805 * Initializes the driver info for combios (r1xx-r3xx).
806 * Returns 0 on sucess.
807 * Called at driver startup.
808 */
771fe6b9
JG
809int radeon_combios_init(struct radeon_device *rdev)
810{
811 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
812 return 0;
813}
814
0c195119
AD
815/**
816 * radeon_combios_fini - free the driver info for combios
817 *
818 * @rdev: radeon_device pointer
819 *
820 * Frees the driver info for combios (r1xx-r3xx).
821 * Called at driver shutdown.
822 */
771fe6b9
JG
823void radeon_combios_fini(struct radeon_device *rdev)
824{
825}
826
0c195119
AD
827/* if we get transitioned to only one device, take VGA back */
828/**
829 * radeon_vga_set_decode - enable/disable vga decode
830 *
831 * @cookie: radeon_device pointer
832 * @state: enable/disable vga decode
833 *
834 * Enable/disable vga decode (all asics).
835 * Returns VGA resource flags.
836 */
28d52043
DA
837static unsigned int radeon_vga_set_decode(void *cookie, bool state)
838{
839 struct radeon_device *rdev = cookie;
28d52043
DA
840 radeon_vga_set_state(rdev, state);
841 if (state)
842 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
843 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
844 else
845 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
846}
c1176d6f 847
1bcb04f7
CK
848/**
849 * radeon_check_pot_argument - check that argument is a power of two
850 *
851 * @arg: value to check
852 *
853 * Validates that a certain argument is a power of two (all asics).
854 * Returns true if argument is valid.
855 */
856static bool radeon_check_pot_argument(int arg)
857{
858 return (arg & (arg - 1)) == 0;
859}
860
0c195119
AD
861/**
862 * radeon_check_arguments - validate module params
863 *
864 * @rdev: radeon_device pointer
865 *
866 * Validates certain module parameters and updates
867 * the associated values used by the driver (all asics).
868 */
1109ca09 869static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
870{
871 /* vramlimit must be a power of two */
1bcb04f7 872 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
873 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
874 radeon_vram_limit);
875 radeon_vram_limit = 0;
36421338 876 }
1bcb04f7 877
36421338 878 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 879 if (radeon_gart_size < 32) {
36421338
JG
880 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
881 radeon_gart_size);
882 radeon_gart_size = 512;
1bcb04f7
CK
883
884 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
885 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
886 radeon_gart_size);
887 radeon_gart_size = 512;
36421338 888 }
1bcb04f7
CK
889 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
890
36421338
JG
891 /* AGP mode can only be -1, 1, 2, 4, 8 */
892 switch (radeon_agpmode) {
893 case -1:
894 case 0:
895 case 1:
896 case 2:
897 case 4:
898 case 8:
899 break;
900 default:
901 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
902 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
903 radeon_agpmode = 0;
904 break;
905 }
906}
907
d1f9809e
ML
908/**
909 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
910 * needed for waking up.
911 *
912 * @pdev: pci dev pointer
913 */
914static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
915{
916
917 /* 6600m in a macbook pro */
918 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
919 pdev->subsystem_device == 0x00e2) {
920 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
921 return true;
922 }
923
924 return false;
925}
926
0c195119
AD
927/**
928 * radeon_switcheroo_set_state - set switcheroo state
929 *
930 * @pdev: pci dev pointer
931 * @state: vga switcheroo state
932 *
933 * Callback for the switcheroo driver. Suspends or resumes the
934 * the asics before or after it is powered up using ACPI methods.
935 */
6a9ee8af
DA
936static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
937{
938 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
939 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
940 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
941 unsigned d3_delay = dev->pdev->d3_delay;
942
6a9ee8af
DA
943 printk(KERN_INFO "radeon: switched on\n");
944 /* don't suspend or resume card normally */
5bcf719b 945 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
946
947 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
948 dev->pdev->d3_delay = 20;
949
6a9ee8af 950 radeon_resume_kms(dev);
d1f9809e
ML
951
952 dev->pdev->d3_delay = d3_delay;
953
5bcf719b 954 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 955 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
956 } else {
957 printk(KERN_INFO "radeon: switched off\n");
fbf81762 958 drm_kms_helper_poll_disable(dev);
5bcf719b 959 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 960 radeon_suspend_kms(dev, pmm);
5bcf719b 961 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
962 }
963}
964
0c195119
AD
965/**
966 * radeon_switcheroo_can_switch - see if switcheroo state can change
967 *
968 * @pdev: pci dev pointer
969 *
970 * Callback for the switcheroo driver. Check of the switcheroo
971 * state can be changed.
972 * Returns true if the state can be changed, false if not.
973 */
6a9ee8af
DA
974static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
975{
976 struct drm_device *dev = pci_get_drvdata(pdev);
977 bool can_switch;
978
979 spin_lock(&dev->count_lock);
980 can_switch = (dev->open_count == 0);
981 spin_unlock(&dev->count_lock);
982 return can_switch;
983}
984
26ec685f
TI
985static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
986 .set_gpu_state = radeon_switcheroo_set_state,
987 .reprobe = NULL,
988 .can_switch = radeon_switcheroo_can_switch,
989};
6a9ee8af 990
0c195119
AD
991/**
992 * radeon_device_init - initialize the driver
993 *
994 * @rdev: radeon_device pointer
995 * @pdev: drm dev pointer
996 * @pdev: pci dev pointer
997 * @flags: driver flags
998 *
999 * Initializes the driver info and hw (all asics).
1000 * Returns 0 for success or an error on failure.
1001 * Called at driver startup.
1002 */
771fe6b9
JG
1003int radeon_device_init(struct radeon_device *rdev,
1004 struct drm_device *ddev,
1005 struct pci_dev *pdev,
1006 uint32_t flags)
1007{
351a52a2 1008 int r, i;
ad49f501 1009 int dma_bits;
771fe6b9 1010
771fe6b9 1011 rdev->shutdown = false;
9f022ddf 1012 rdev->dev = &pdev->dev;
771fe6b9
JG
1013 rdev->ddev = ddev;
1014 rdev->pdev = pdev;
1015 rdev->flags = flags;
1016 rdev->family = flags & RADEON_FAMILY_MASK;
1017 rdev->is_atom_bios = false;
1018 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1019 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 1020 rdev->accel_working = false;
8b25ed34
AD
1021 /* set up ring ids */
1022 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1023 rdev->ring[i].idx = i;
1024 }
1b5331d9 1025
d522d9cc
TR
1026 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1027 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1028 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1029
771fe6b9
JG
1030 /* mutex initialization are all done here so we
1031 * can recall function without having locking issues */
d6999bc7 1032 mutex_init(&rdev->ring_lock);
40bacf16 1033 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1034 atomic_set(&rdev->ih.lock, 0);
4c788679 1035 mutex_init(&rdev->gem.mutex);
c913e23a 1036 mutex_init(&rdev->pm.mutex);
6759a0a7 1037 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1038 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1039 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1040 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1041 r = radeon_gem_init(rdev);
1042 if (r)
1043 return r;
721604a1 1044 /* initialize vm here */
36ff39c4 1045 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1046 /* Adjust VM size here.
1047 * Currently set to 4GB ((1 << 20) 4k pages).
1048 * Max GPUVM size for cayman and SI is 40 bits.
1049 */
721604a1
JG
1050 rdev->vm_manager.max_pfn = 1 << 20;
1051 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1052
4aac0473
JG
1053 /* Set asic functions */
1054 r = radeon_asic_init(rdev);
36421338 1055 if (r)
4aac0473 1056 return r;
36421338 1057 radeon_check_arguments(rdev);
4aac0473 1058
f95df9ca
AD
1059 /* all of the newer IGP chips have an internal gart
1060 * However some rs4xx report as AGP, so remove that here.
1061 */
1062 if ((rdev->family >= CHIP_RS400) &&
1063 (rdev->flags & RADEON_IS_IGP)) {
1064 rdev->flags &= ~RADEON_IS_AGP;
1065 }
1066
30256a3f 1067 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1068 radeon_agp_disable(rdev);
771fe6b9
JG
1069 }
1070
9ed8b1f9
AD
1071 /* Set the internal MC address mask
1072 * This is the max address of the GPU's
1073 * internal address space.
1074 */
1075 if (rdev->family >= CHIP_CAYMAN)
1076 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1077 else if (rdev->family >= CHIP_CEDAR)
1078 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1079 else
1080 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1081
ad49f501
DA
1082 /* set DMA mask + need_dma32 flags.
1083 * PCIE - can handle 40-bits.
005a83f1 1084 * IGP - can handle 40-bits
ad49f501 1085 * AGP - generally dma32 is safest
005a83f1 1086 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1087 */
1088 rdev->need_dma32 = false;
1089 if (rdev->flags & RADEON_IS_AGP)
1090 rdev->need_dma32 = true;
005a83f1 1091 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1092 (rdev->family <= CHIP_RS740))
ad49f501
DA
1093 rdev->need_dma32 = true;
1094
1095 dma_bits = rdev->need_dma32 ? 32 : 40;
1096 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1097 if (r) {
62fff811 1098 rdev->need_dma32 = true;
c52494f6 1099 dma_bits = 32;
771fe6b9
JG
1100 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1101 }
c52494f6
KRW
1102 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1103 if (r) {
1104 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1105 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1106 }
771fe6b9
JG
1107
1108 /* Registers mapping */
1109 /* TODO: block userspace mapping of io register */
2c385151 1110 spin_lock_init(&rdev->mmio_idx_lock);
01d73a69
JC
1111 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1112 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
1113 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1114 if (rdev->rmmio == NULL) {
1115 return -ENOMEM;
1116 }
1117 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1118 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1119
351a52a2
AD
1120 /* io port mapping */
1121 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1122 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1123 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1124 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1125 break;
1126 }
1127 }
1128 if (rdev->rio_mem == NULL)
1129 DRM_ERROR("Unable to find PCI I/O BAR\n");
1130
28d52043 1131 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1132 /* this will fail for cards that aren't VGA class devices, just
1133 * ignore it */
1134 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1135 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1136
3ce0a23d 1137 r = radeon_init(rdev);
b574f251 1138 if (r)
3ce0a23d 1139 return r;
3ce0a23d 1140
04eb2206
CK
1141 r = radeon_ib_ring_tests(rdev);
1142 if (r)
1143 DRM_ERROR("ib ring test failed (%d).\n", r);
1144
b574f251
JG
1145 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1146 /* Acceleration not working on AGP card try again
1147 * with fallback to PCI or PCIE GART
1148 */
a2d07b74 1149 radeon_asic_reset(rdev);
b574f251
JG
1150 radeon_fini(rdev);
1151 radeon_agp_disable(rdev);
1152 r = radeon_init(rdev);
4aac0473
JG
1153 if (r)
1154 return r;
771fe6b9 1155 }
60a7e396 1156 if ((radeon_testing & 1)) {
ecc0b326
MD
1157 radeon_test_moves(rdev);
1158 }
60a7e396
CK
1159 if ((radeon_testing & 2)) {
1160 radeon_test_syncing(rdev);
1161 }
771fe6b9 1162 if (radeon_benchmarking) {
638dd7db 1163 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1164 }
6cf8a3f5 1165 return 0;
771fe6b9
JG
1166}
1167
4d8bf9ae
CK
1168static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1169
0c195119
AD
1170/**
1171 * radeon_device_fini - tear down the driver
1172 *
1173 * @rdev: radeon_device pointer
1174 *
1175 * Tear down the driver info (all asics).
1176 * Called at driver shutdown.
1177 */
771fe6b9
JG
1178void radeon_device_fini(struct radeon_device *rdev)
1179{
771fe6b9
JG
1180 DRM_INFO("radeon: finishing device.\n");
1181 rdev->shutdown = true;
90aca4d2
JG
1182 /* evict vram memory */
1183 radeon_bo_evict_vram(rdev);
62a8ea3f 1184 radeon_fini(rdev);
6a9ee8af 1185 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1186 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1187 if (rdev->rio_mem)
1188 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1189 rdev->rio_mem = NULL;
771fe6b9
JG
1190 iounmap(rdev->rmmio);
1191 rdev->rmmio = NULL;
4d8bf9ae 1192 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1193}
1194
1195
1196/*
1197 * Suspend & resume.
1198 */
0c195119
AD
1199/**
1200 * radeon_suspend_kms - initiate device suspend
1201 *
1202 * @pdev: drm dev pointer
1203 * @state: suspend state
1204 *
1205 * Puts the hw in the suspend state (all asics).
1206 * Returns 0 for success or an error on failure.
1207 * Called at driver suspend.
1208 */
771fe6b9
JG
1209int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1210{
875c1866 1211 struct radeon_device *rdev;
771fe6b9 1212 struct drm_crtc *crtc;
d8dcaa1d 1213 struct drm_connector *connector;
7465280c 1214 int i, r;
5f8f635e 1215 bool force_completion = false;
771fe6b9 1216
875c1866 1217 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1218 return -ENODEV;
1219 }
1220 if (state.event == PM_EVENT_PRETHAW) {
1221 return 0;
1222 }
875c1866
DJ
1223 rdev = dev->dev_private;
1224
5bcf719b 1225 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1226 return 0;
d8dcaa1d 1227
86698c20
SF
1228 drm_kms_helper_poll_disable(dev);
1229
d8dcaa1d
AD
1230 /* turn off display hw */
1231 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1232 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1233 }
1234
771fe6b9
JG
1235 /* unpin the front buffers */
1236 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1237 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1238 struct radeon_bo *robj;
771fe6b9
JG
1239
1240 if (rfb == NULL || rfb->obj == NULL) {
1241 continue;
1242 }
7e4d15d9 1243 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1244 /* don't unpin kernel fb objects */
1245 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1246 r = radeon_bo_reserve(robj, false);
38651674 1247 if (r == 0) {
4c788679
JG
1248 radeon_bo_unpin(robj);
1249 radeon_bo_unreserve(robj);
1250 }
771fe6b9
JG
1251 }
1252 }
1253 /* evict vram memory */
4c788679 1254 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1255
1256 mutex_lock(&rdev->ring_lock);
771fe6b9 1257 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1258 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1259 r = radeon_fence_wait_empty_locked(rdev, i);
1260 if (r) {
1261 /* delay GPU reset to resume */
1262 force_completion = true;
1263 }
1264 }
1265 if (force_completion) {
1266 radeon_fence_driver_force_completion(rdev);
1267 }
8a47cc9e 1268 mutex_unlock(&rdev->ring_lock);
771fe6b9 1269
f657c2a7
YZ
1270 radeon_save_bios_scratch_regs(rdev);
1271
ce8f5370 1272 radeon_pm_suspend(rdev);
62a8ea3f 1273 radeon_suspend(rdev);
d4877cf2 1274 radeon_hpd_fini(rdev);
771fe6b9 1275 /* evict remaining vram memory */
4c788679 1276 radeon_bo_evict_vram(rdev);
771fe6b9 1277
10b06122
JG
1278 radeon_agp_suspend(rdev);
1279
771fe6b9
JG
1280 pci_save_state(dev->pdev);
1281 if (state.event == PM_EVENT_SUSPEND) {
1282 /* Shut down the device */
1283 pci_disable_device(dev->pdev);
1284 pci_set_power_state(dev->pdev, PCI_D3hot);
1285 }
ac751efa 1286 console_lock();
38651674 1287 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1288 console_unlock();
771fe6b9
JG
1289 return 0;
1290}
1291
0c195119
AD
1292/**
1293 * radeon_resume_kms - initiate device resume
1294 *
1295 * @pdev: drm dev pointer
1296 *
1297 * Bring the hw back to operating state (all asics).
1298 * Returns 0 for success or an error on failure.
1299 * Called at driver resume.
1300 */
771fe6b9
JG
1301int radeon_resume_kms(struct drm_device *dev)
1302{
09bdf591 1303 struct drm_connector *connector;
771fe6b9 1304 struct radeon_device *rdev = dev->dev_private;
04eb2206 1305 int r;
771fe6b9 1306
5bcf719b 1307 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1308 return 0;
1309
ac751efa 1310 console_lock();
771fe6b9
JG
1311 pci_set_power_state(dev->pdev, PCI_D0);
1312 pci_restore_state(dev->pdev);
1313 if (pci_enable_device(dev->pdev)) {
ac751efa 1314 console_unlock();
771fe6b9
JG
1315 return -1;
1316 }
0ebf1717
DA
1317 /* resume AGP if in use */
1318 radeon_agp_resume(rdev);
62a8ea3f 1319 radeon_resume(rdev);
04eb2206
CK
1320
1321 r = radeon_ib_ring_tests(rdev);
1322 if (r)
1323 DRM_ERROR("ib ring test failed (%d).\n", r);
1324
ce8f5370 1325 radeon_pm_resume(rdev);
f657c2a7 1326 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1327
38651674 1328 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1329 console_unlock();
771fe6b9 1330
3fa47d9e
AD
1331 /* init dig PHYs, disp eng pll */
1332 if (rdev->is_atom_bios) {
ac89af1e 1333 radeon_atom_encoder_init(rdev);
f3f1f03e 1334 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1335 /* turn on the BL */
1336 if (rdev->mode_info.bl_encoder) {
1337 u8 bl_level = radeon_get_backlight_level(rdev,
1338 rdev->mode_info.bl_encoder);
1339 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1340 bl_level);
1341 }
3fa47d9e 1342 }
d4877cf2
AD
1343 /* reset hpd state */
1344 radeon_hpd_init(rdev);
771fe6b9
JG
1345 /* blat the mode back in */
1346 drm_helper_resume_force_mode(dev);
a93f344d
AD
1347 /* turn on display hw */
1348 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1349 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1350 }
86698c20
SF
1351
1352 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1353 return 0;
1354}
1355
0c195119
AD
1356/**
1357 * radeon_gpu_reset - reset the asic
1358 *
1359 * @rdev: radeon device pointer
1360 *
1361 * Attempt the reset the GPU if it has hung (all asics).
1362 * Returns 0 for success or an error on failure.
1363 */
90aca4d2
JG
1364int radeon_gpu_reset(struct radeon_device *rdev)
1365{
55d7c221
CK
1366 unsigned ring_sizes[RADEON_NUM_RINGS];
1367 uint32_t *ring_data[RADEON_NUM_RINGS];
1368
1369 bool saved = false;
1370
1371 int i, r;
8fd1b84c 1372 int resched;
90aca4d2 1373
dee53e7f 1374 down_write(&rdev->exclusive_lock);
90aca4d2 1375 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1376 /* block TTM */
1377 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1378 radeon_suspend(rdev);
1379
55d7c221
CK
1380 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1381 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1382 &ring_data[i]);
1383 if (ring_sizes[i]) {
1384 saved = true;
1385 dev_info(rdev->dev, "Saved %d dwords of commands "
1386 "on ring %d.\n", ring_sizes[i], i);
1387 }
1388 }
1389
1390retry:
90aca4d2
JG
1391 r = radeon_asic_reset(rdev);
1392 if (!r) {
55d7c221 1393 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1394 radeon_resume(rdev);
55d7c221 1395 }
04eb2206 1396
55d7c221 1397 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1398
55d7c221
CK
1399 if (!r) {
1400 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1401 radeon_ring_restore(rdev, &rdev->ring[i],
1402 ring_sizes[i], ring_data[i]);
f54b350d
CK
1403 ring_sizes[i] = 0;
1404 ring_data[i] = NULL;
55d7c221
CK
1405 }
1406
1407 r = radeon_ib_ring_tests(rdev);
1408 if (r) {
1409 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1410 if (saved) {
f54b350d 1411 saved = false;
55d7c221
CK
1412 radeon_suspend(rdev);
1413 goto retry;
1414 }
1415 }
1416 } else {
76903b96 1417 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1418 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1419 kfree(ring_data[i]);
1420 }
90aca4d2 1421 }
7a1619b9 1422
d3493574
JG
1423 drm_helper_resume_force_mode(rdev->ddev);
1424
55d7c221 1425 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1426 if (r) {
1427 /* bad news, how to tell it to userspace ? */
1428 dev_info(rdev->dev, "GPU reset failed\n");
1429 }
1430
dee53e7f 1431 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1432 return r;
1433}
1434
771fe6b9
JG
1435
1436/*
1437 * Debugfs
1438 */
771fe6b9
JG
1439int radeon_debugfs_add_files(struct radeon_device *rdev,
1440 struct drm_info_list *files,
1441 unsigned nfiles)
1442{
1443 unsigned i;
1444
4d8bf9ae
CK
1445 for (i = 0; i < rdev->debugfs_count; i++) {
1446 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1447 /* Already registered */
1448 return 0;
1449 }
1450 }
c245cb9e 1451
4d8bf9ae 1452 i = rdev->debugfs_count + 1;
c245cb9e
MW
1453 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1454 DRM_ERROR("Reached maximum number of debugfs components.\n");
1455 DRM_ERROR("Report so we increase "
1456 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1457 return -EINVAL;
1458 }
4d8bf9ae
CK
1459 rdev->debugfs[rdev->debugfs_count].files = files;
1460 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1461 rdev->debugfs_count = i;
771fe6b9
JG
1462#if defined(CONFIG_DEBUG_FS)
1463 drm_debugfs_create_files(files, nfiles,
1464 rdev->ddev->control->debugfs_root,
1465 rdev->ddev->control);
1466 drm_debugfs_create_files(files, nfiles,
1467 rdev->ddev->primary->debugfs_root,
1468 rdev->ddev->primary);
1469#endif
1470 return 0;
1471}
1472
4d8bf9ae
CK
1473static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1474{
1475#if defined(CONFIG_DEBUG_FS)
1476 unsigned i;
1477
1478 for (i = 0; i < rdev->debugfs_count; i++) {
1479 drm_debugfs_remove_files(rdev->debugfs[i].files,
1480 rdev->debugfs[i].num_files,
1481 rdev->ddev->control);
1482 drm_debugfs_remove_files(rdev->debugfs[i].files,
1483 rdev->debugfs[i].num_files,
1484 rdev->ddev->primary);
1485 }
1486#endif
1487}
1488
771fe6b9
JG
1489#if defined(CONFIG_DEBUG_FS)
1490int radeon_debugfs_init(struct drm_minor *minor)
1491{
1492 return 0;
1493}
1494
1495void radeon_debugfs_cleanup(struct drm_minor *minor)
1496{
771fe6b9
JG
1497}
1498#endif
This page took 0.35381 seconds and 5 git commands to generate.