drm/radeon: remove overzealous warning in hdmi handling
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
1b5331d9
JG
97 "LAST",
98};
99
0c195119
AD
100/**
101 * radeon_surface_init - Clear GPU surface registers.
102 *
103 * @rdev: radeon_device pointer
104 *
105 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 106 */
3ce0a23d 107void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
108{
109 /* FIXME: check this out */
110 if (rdev->family < CHIP_R600) {
111 int i;
112
550e2d92
DA
113 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
114 if (rdev->surface_regs[i].bo)
115 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
116 else
117 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 118 }
e024e110
DA
119 /* enable surfaces */
120 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
121 }
122}
123
771fe6b9
JG
124/*
125 * GPU scratch registers helpers function.
126 */
0c195119
AD
127/**
128 * radeon_scratch_init - Init scratch register driver information.
129 *
130 * @rdev: radeon_device pointer
131 *
132 * Init CP scratch register driver information (r1xx-r5xx)
133 */
3ce0a23d 134void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
135{
136 int i;
137
138 /* FIXME: check this out */
139 if (rdev->family < CHIP_R300) {
140 rdev->scratch.num_reg = 5;
141 } else {
142 rdev->scratch.num_reg = 7;
143 }
724c80e1 144 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
145 for (i = 0; i < rdev->scratch.num_reg; i++) {
146 rdev->scratch.free[i] = true;
724c80e1 147 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
148 }
149}
150
0c195119
AD
151/**
152 * radeon_scratch_get - Allocate a scratch register
153 *
154 * @rdev: radeon_device pointer
155 * @reg: scratch register mmio offset
156 *
157 * Allocate a CP scratch register for use by the driver (all asics).
158 * Returns 0 on success or -EINVAL on failure.
159 */
771fe6b9
JG
160int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
161{
162 int i;
163
164 for (i = 0; i < rdev->scratch.num_reg; i++) {
165 if (rdev->scratch.free[i]) {
166 rdev->scratch.free[i] = false;
167 *reg = rdev->scratch.reg[i];
168 return 0;
169 }
170 }
171 return -EINVAL;
172}
173
0c195119
AD
174/**
175 * radeon_scratch_free - Free a scratch register
176 *
177 * @rdev: radeon_device pointer
178 * @reg: scratch register mmio offset
179 *
180 * Free a CP scratch register allocated for use by the driver (all asics)
181 */
771fe6b9
JG
182void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
183{
184 int i;
185
186 for (i = 0; i < rdev->scratch.num_reg; i++) {
187 if (rdev->scratch.reg[i] == reg) {
188 rdev->scratch.free[i] = true;
189 return;
190 }
191 }
192}
193
0c195119
AD
194/*
195 * radeon_wb_*()
196 * Writeback is the the method by which the the GPU updates special pages
197 * in memory with the status of certain GPU events (fences, ring pointers,
198 * etc.).
199 */
200
201/**
202 * radeon_wb_disable - Disable Writeback
203 *
204 * @rdev: radeon_device pointer
205 *
206 * Disables Writeback (all asics). Used for suspend.
207 */
724c80e1
AD
208void radeon_wb_disable(struct radeon_device *rdev)
209{
210 int r;
211
212 if (rdev->wb.wb_obj) {
213 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
214 if (unlikely(r != 0))
215 return;
216 radeon_bo_kunmap(rdev->wb.wb_obj);
217 radeon_bo_unpin(rdev->wb.wb_obj);
218 radeon_bo_unreserve(rdev->wb.wb_obj);
219 }
220 rdev->wb.enabled = false;
221}
222
0c195119
AD
223/**
224 * radeon_wb_fini - Disable Writeback and free memory
225 *
226 * @rdev: radeon_device pointer
227 *
228 * Disables Writeback and frees the Writeback memory (all asics).
229 * Used at driver shutdown.
230 */
724c80e1
AD
231void radeon_wb_fini(struct radeon_device *rdev)
232{
233 radeon_wb_disable(rdev);
234 if (rdev->wb.wb_obj) {
235 radeon_bo_unref(&rdev->wb.wb_obj);
236 rdev->wb.wb = NULL;
237 rdev->wb.wb_obj = NULL;
238 }
239}
240
0c195119
AD
241/**
242 * radeon_wb_init- Init Writeback driver info and allocate memory
243 *
244 * @rdev: radeon_device pointer
245 *
246 * Disables Writeback and frees the Writeback memory (all asics).
247 * Used at driver startup.
248 * Returns 0 on success or an -error on failure.
249 */
724c80e1
AD
250int radeon_wb_init(struct radeon_device *rdev)
251{
252 int r;
253
254 if (rdev->wb.wb_obj == NULL) {
441921d5 255 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 256 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
257 if (r) {
258 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
259 return r;
260 }
261 }
262 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
263 if (unlikely(r != 0)) {
264 radeon_wb_fini(rdev);
265 return r;
266 }
267 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
268 &rdev->wb.gpu_addr);
269 if (r) {
270 radeon_bo_unreserve(rdev->wb.wb_obj);
271 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
272 radeon_wb_fini(rdev);
273 return r;
274 }
275 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
276 radeon_bo_unreserve(rdev->wb.wb_obj);
277 if (r) {
278 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
279 radeon_wb_fini(rdev);
280 return r;
281 }
282
e6ba7599
AD
283 /* clear wb memory */
284 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
285 /* disable event_write fences */
286 rdev->wb.use_event = false;
724c80e1 287 /* disabled via module param */
3b7a2b24 288 if (radeon_no_wb == 1) {
724c80e1 289 rdev->wb.enabled = false;
3b7a2b24 290 } else {
724c80e1 291 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
292 /* often unreliable on AGP */
293 rdev->wb.enabled = false;
294 } else if (rdev->family < CHIP_R300) {
295 /* often unreliable on pre-r300 */
724c80e1 296 rdev->wb.enabled = false;
d0f8a854 297 } else {
724c80e1 298 rdev->wb.enabled = true;
d0f8a854 299 /* event_write fences are only available on r600+ */
3b7a2b24 300 if (rdev->family >= CHIP_R600) {
d0f8a854 301 rdev->wb.use_event = true;
3b7a2b24 302 }
d0f8a854 303 }
724c80e1 304 }
c994ead6
AD
305 /* always use writeback/events on NI, APUs */
306 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
307 rdev->wb.enabled = true;
308 rdev->wb.use_event = true;
309 }
724c80e1
AD
310
311 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
312
313 return 0;
314}
315
d594e46a
JG
316/**
317 * radeon_vram_location - try to find VRAM location
318 * @rdev: radeon device structure holding all necessary informations
319 * @mc: memory controller structure holding memory informations
320 * @base: base address at which to put VRAM
321 *
322 * Function will place try to place VRAM at base address provided
323 * as parameter (which is so far either PCI aperture address or
324 * for IGP TOM base address).
325 *
326 * If there is not enough space to fit the unvisible VRAM in the 32bits
327 * address space then we limit the VRAM size to the aperture.
328 *
329 * If we are using AGP and if the AGP aperture doesn't allow us to have
330 * room for all the VRAM than we restrict the VRAM to the PCI aperture
331 * size and print a warning.
332 *
333 * This function will never fails, worst case are limiting VRAM.
334 *
335 * Note: GTT start, end, size should be initialized before calling this
336 * function on AGP platform.
337 *
25985edc 338 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
339 * this shouldn't be a problem as we are using the PCI aperture as a reference.
340 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
341 * not IGP.
342 *
343 * Note: we use mc_vram_size as on some board we need to program the mc to
344 * cover the whole aperture even if VRAM size is inferior to aperture size
345 * Novell bug 204882 + along with lots of ubuntu ones
346 *
347 * Note: when limiting vram it's safe to overwritte real_vram_size because
348 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
349 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
350 * ones)
351 *
352 * Note: IGP TOM addr should be the same as the aperture addr, we don't
353 * explicitly check for that thought.
354 *
355 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 356 */
d594e46a 357void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 358{
1bcb04f7
CK
359 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
360
d594e46a
JG
361 mc->vram_start = base;
362 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
363 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
364 mc->real_vram_size = mc->aper_size;
365 mc->mc_vram_size = mc->aper_size;
366 }
367 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 368 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
369 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
370 mc->real_vram_size = mc->aper_size;
371 mc->mc_vram_size = mc->aper_size;
372 }
373 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
374 if (limit && limit < mc->real_vram_size)
375 mc->real_vram_size = limit;
dd7cc55a 376 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
377 mc->mc_vram_size >> 20, mc->vram_start,
378 mc->vram_end, mc->real_vram_size >> 20);
379}
771fe6b9 380
d594e46a
JG
381/**
382 * radeon_gtt_location - try to find GTT location
383 * @rdev: radeon device structure holding all necessary informations
384 * @mc: memory controller structure holding memory informations
385 *
386 * Function will place try to place GTT before or after VRAM.
387 *
388 * If GTT size is bigger than space left then we ajust GTT size.
389 * Thus function will never fails.
390 *
391 * FIXME: when reducing GTT size align new size on power of 2.
392 */
393void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
394{
395 u64 size_af, size_bf;
396
8d369bb1
AD
397 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
398 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
399 if (size_bf > size_af) {
400 if (mc->gtt_size > size_bf) {
401 dev_warn(rdev->dev, "limiting GTT\n");
402 mc->gtt_size = size_bf;
771fe6b9 403 }
8d369bb1 404 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 405 } else {
d594e46a
JG
406 if (mc->gtt_size > size_af) {
407 dev_warn(rdev->dev, "limiting GTT\n");
408 mc->gtt_size = size_af;
409 }
8d369bb1 410 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 411 }
d594e46a 412 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 413 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 414 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
415}
416
771fe6b9
JG
417/*
418 * GPU helpers function.
419 */
0c195119
AD
420/**
421 * radeon_card_posted - check if the hw has already been initialized
422 *
423 * @rdev: radeon_device pointer
424 *
425 * Check if the asic has been initialized (all asics).
426 * Used at driver startup.
427 * Returns true if initialized or false if not.
428 */
9f022ddf 429bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
430{
431 uint32_t reg;
432
bcc65fd8
MG
433 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
434 return false;
435
771fe6b9 436 /* first check CRTCs */
18007401
AD
437 if (ASIC_IS_DCE41(rdev)) {
438 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
439 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
440 if (reg & EVERGREEN_CRTC_MASTER_EN)
441 return true;
442 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
443 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
444 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
445 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
446 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
447 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
448 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
449 if (reg & EVERGREEN_CRTC_MASTER_EN)
450 return true;
451 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
452 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
453 RREG32(AVIVO_D2CRTC_CONTROL);
454 if (reg & AVIVO_CRTC_EN) {
455 return true;
456 }
457 } else {
458 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
459 RREG32(RADEON_CRTC2_GEN_CNTL);
460 if (reg & RADEON_CRTC_EN) {
461 return true;
462 }
463 }
464
465 /* then check MEM_SIZE, in case the crtcs are off */
466 if (rdev->family >= CHIP_R600)
467 reg = RREG32(R600_CONFIG_MEMSIZE);
468 else
469 reg = RREG32(RADEON_CONFIG_MEMSIZE);
470
471 if (reg)
472 return true;
473
474 return false;
475
476}
477
0c195119
AD
478/**
479 * radeon_update_bandwidth_info - update display bandwidth params
480 *
481 * @rdev: radeon_device pointer
482 *
483 * Used when sclk/mclk are switched or display modes are set.
484 * params are used to calculate display watermarks (all asics)
485 */
f47299c5
AD
486void radeon_update_bandwidth_info(struct radeon_device *rdev)
487{
488 fixed20_12 a;
8807286e
AD
489 u32 sclk = rdev->pm.current_sclk;
490 u32 mclk = rdev->pm.current_mclk;
f47299c5 491
8807286e
AD
492 /* sclk/mclk in Mhz */
493 a.full = dfixed_const(100);
494 rdev->pm.sclk.full = dfixed_const(sclk);
495 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
496 rdev->pm.mclk.full = dfixed_const(mclk);
497 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 498
8807286e 499 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 500 a.full = dfixed_const(16);
f47299c5 501 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 502 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
503 }
504}
505
0c195119
AD
506/**
507 * radeon_boot_test_post_card - check and possibly initialize the hw
508 *
509 * @rdev: radeon_device pointer
510 *
511 * Check if the asic is initialized and if not, attempt to initialize
512 * it (all asics).
513 * Returns true if initialized or false if not.
514 */
72542d77
DA
515bool radeon_boot_test_post_card(struct radeon_device *rdev)
516{
517 if (radeon_card_posted(rdev))
518 return true;
519
520 if (rdev->bios) {
521 DRM_INFO("GPU not posted. posting now...\n");
522 if (rdev->is_atom_bios)
523 atom_asic_init(rdev->mode_info.atom_context);
524 else
525 radeon_combios_asic_init(rdev->ddev);
526 return true;
527 } else {
528 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
529 return false;
530 }
531}
532
0c195119
AD
533/**
534 * radeon_dummy_page_init - init dummy page used by the driver
535 *
536 * @rdev: radeon_device pointer
537 *
538 * Allocate the dummy page used by the driver (all asics).
539 * This dummy page is used by the driver as a filler for gart entries
540 * when pages are taken out of the GART
541 * Returns 0 on sucess, -ENOMEM on failure.
542 */
3ce0a23d
JG
543int radeon_dummy_page_init(struct radeon_device *rdev)
544{
82568565
DA
545 if (rdev->dummy_page.page)
546 return 0;
3ce0a23d
JG
547 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
548 if (rdev->dummy_page.page == NULL)
549 return -ENOMEM;
550 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
551 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
552 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
553 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
554 __free_page(rdev->dummy_page.page);
555 rdev->dummy_page.page = NULL;
556 return -ENOMEM;
557 }
558 return 0;
559}
560
0c195119
AD
561/**
562 * radeon_dummy_page_fini - free dummy page used by the driver
563 *
564 * @rdev: radeon_device pointer
565 *
566 * Frees the dummy page used by the driver (all asics).
567 */
3ce0a23d
JG
568void radeon_dummy_page_fini(struct radeon_device *rdev)
569{
570 if (rdev->dummy_page.page == NULL)
571 return;
572 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
573 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
574 __free_page(rdev->dummy_page.page);
575 rdev->dummy_page.page = NULL;
576}
577
771fe6b9 578
771fe6b9 579/* ATOM accessor methods */
0c195119
AD
580/*
581 * ATOM is an interpreted byte code stored in tables in the vbios. The
582 * driver registers callbacks to access registers and the interpreter
583 * in the driver parses the tables and executes then to program specific
584 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
585 * atombios.h, and atom.c
586 */
587
588/**
589 * cail_pll_read - read PLL register
590 *
591 * @info: atom card_info pointer
592 * @reg: PLL register offset
593 *
594 * Provides a PLL register accessor for the atom interpreter (r4xx+).
595 * Returns the value of the PLL register.
596 */
771fe6b9
JG
597static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
598{
599 struct radeon_device *rdev = info->dev->dev_private;
600 uint32_t r;
601
602 r = rdev->pll_rreg(rdev, reg);
603 return r;
604}
605
0c195119
AD
606/**
607 * cail_pll_write - write PLL register
608 *
609 * @info: atom card_info pointer
610 * @reg: PLL register offset
611 * @val: value to write to the pll register
612 *
613 * Provides a PLL register accessor for the atom interpreter (r4xx+).
614 */
771fe6b9
JG
615static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
616{
617 struct radeon_device *rdev = info->dev->dev_private;
618
619 rdev->pll_wreg(rdev, reg, val);
620}
621
0c195119
AD
622/**
623 * cail_mc_read - read MC (Memory Controller) register
624 *
625 * @info: atom card_info pointer
626 * @reg: MC register offset
627 *
628 * Provides an MC register accessor for the atom interpreter (r4xx+).
629 * Returns the value of the MC register.
630 */
771fe6b9
JG
631static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
632{
633 struct radeon_device *rdev = info->dev->dev_private;
634 uint32_t r;
635
636 r = rdev->mc_rreg(rdev, reg);
637 return r;
638}
639
0c195119
AD
640/**
641 * cail_mc_write - write MC (Memory Controller) register
642 *
643 * @info: atom card_info pointer
644 * @reg: MC register offset
645 * @val: value to write to the pll register
646 *
647 * Provides a MC register accessor for the atom interpreter (r4xx+).
648 */
771fe6b9
JG
649static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
650{
651 struct radeon_device *rdev = info->dev->dev_private;
652
653 rdev->mc_wreg(rdev, reg, val);
654}
655
0c195119
AD
656/**
657 * cail_reg_write - write MMIO register
658 *
659 * @info: atom card_info pointer
660 * @reg: MMIO register offset
661 * @val: value to write to the pll register
662 *
663 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
664 */
771fe6b9
JG
665static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
666{
667 struct radeon_device *rdev = info->dev->dev_private;
668
669 WREG32(reg*4, val);
670}
671
0c195119
AD
672/**
673 * cail_reg_read - read MMIO register
674 *
675 * @info: atom card_info pointer
676 * @reg: MMIO register offset
677 *
678 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
679 * Returns the value of the MMIO register.
680 */
771fe6b9
JG
681static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
682{
683 struct radeon_device *rdev = info->dev->dev_private;
684 uint32_t r;
685
686 r = RREG32(reg*4);
687 return r;
688}
689
0c195119
AD
690/**
691 * cail_ioreg_write - write IO register
692 *
693 * @info: atom card_info pointer
694 * @reg: IO register offset
695 * @val: value to write to the pll register
696 *
697 * Provides a IO register accessor for the atom interpreter (r4xx+).
698 */
351a52a2
AD
699static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
700{
701 struct radeon_device *rdev = info->dev->dev_private;
702
703 WREG32_IO(reg*4, val);
704}
705
0c195119
AD
706/**
707 * cail_ioreg_read - read IO register
708 *
709 * @info: atom card_info pointer
710 * @reg: IO register offset
711 *
712 * Provides an IO register accessor for the atom interpreter (r4xx+).
713 * Returns the value of the IO register.
714 */
351a52a2
AD
715static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
716{
717 struct radeon_device *rdev = info->dev->dev_private;
718 uint32_t r;
719
720 r = RREG32_IO(reg*4);
721 return r;
722}
723
0c195119
AD
724/**
725 * radeon_atombios_init - init the driver info and callbacks for atombios
726 *
727 * @rdev: radeon_device pointer
728 *
729 * Initializes the driver info and register access callbacks for the
730 * ATOM interpreter (r4xx+).
731 * Returns 0 on sucess, -ENOMEM on failure.
732 * Called at driver startup.
733 */
771fe6b9
JG
734int radeon_atombios_init(struct radeon_device *rdev)
735{
61c4b24b
MF
736 struct card_info *atom_card_info =
737 kzalloc(sizeof(struct card_info), GFP_KERNEL);
738
739 if (!atom_card_info)
740 return -ENOMEM;
741
742 rdev->mode_info.atom_card_info = atom_card_info;
743 atom_card_info->dev = rdev->ddev;
744 atom_card_info->reg_read = cail_reg_read;
745 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
746 /* needed for iio ops */
747 if (rdev->rio_mem) {
748 atom_card_info->ioreg_read = cail_ioreg_read;
749 atom_card_info->ioreg_write = cail_ioreg_write;
750 } else {
751 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
752 atom_card_info->ioreg_read = cail_reg_read;
753 atom_card_info->ioreg_write = cail_reg_write;
754 }
61c4b24b
MF
755 atom_card_info->mc_read = cail_mc_read;
756 atom_card_info->mc_write = cail_mc_write;
757 atom_card_info->pll_read = cail_pll_read;
758 atom_card_info->pll_write = cail_pll_write;
759
760 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
c31ad97f 761 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 762 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 763 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
764 return 0;
765}
766
0c195119
AD
767/**
768 * radeon_atombios_fini - free the driver info and callbacks for atombios
769 *
770 * @rdev: radeon_device pointer
771 *
772 * Frees the driver info and register access callbacks for the ATOM
773 * interpreter (r4xx+).
774 * Called at driver shutdown.
775 */
771fe6b9
JG
776void radeon_atombios_fini(struct radeon_device *rdev)
777{
4a04a844
JG
778 if (rdev->mode_info.atom_context) {
779 kfree(rdev->mode_info.atom_context->scratch);
780 kfree(rdev->mode_info.atom_context);
781 }
61c4b24b 782 kfree(rdev->mode_info.atom_card_info);
771fe6b9
JG
783}
784
0c195119
AD
785/* COMBIOS */
786/*
787 * COMBIOS is the bios format prior to ATOM. It provides
788 * command tables similar to ATOM, but doesn't have a unified
789 * parser. See radeon_combios.c
790 */
791
792/**
793 * radeon_combios_init - init the driver info for combios
794 *
795 * @rdev: radeon_device pointer
796 *
797 * Initializes the driver info for combios (r1xx-r3xx).
798 * Returns 0 on sucess.
799 * Called at driver startup.
800 */
771fe6b9
JG
801int radeon_combios_init(struct radeon_device *rdev)
802{
803 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
804 return 0;
805}
806
0c195119
AD
807/**
808 * radeon_combios_fini - free the driver info for combios
809 *
810 * @rdev: radeon_device pointer
811 *
812 * Frees the driver info for combios (r1xx-r3xx).
813 * Called at driver shutdown.
814 */
771fe6b9
JG
815void radeon_combios_fini(struct radeon_device *rdev)
816{
817}
818
0c195119
AD
819/* if we get transitioned to only one device, take VGA back */
820/**
821 * radeon_vga_set_decode - enable/disable vga decode
822 *
823 * @cookie: radeon_device pointer
824 * @state: enable/disable vga decode
825 *
826 * Enable/disable vga decode (all asics).
827 * Returns VGA resource flags.
828 */
28d52043
DA
829static unsigned int radeon_vga_set_decode(void *cookie, bool state)
830{
831 struct radeon_device *rdev = cookie;
28d52043
DA
832 radeon_vga_set_state(rdev, state);
833 if (state)
834 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
835 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
836 else
837 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
838}
c1176d6f 839
1bcb04f7
CK
840/**
841 * radeon_check_pot_argument - check that argument is a power of two
842 *
843 * @arg: value to check
844 *
845 * Validates that a certain argument is a power of two (all asics).
846 * Returns true if argument is valid.
847 */
848static bool radeon_check_pot_argument(int arg)
849{
850 return (arg & (arg - 1)) == 0;
851}
852
0c195119
AD
853/**
854 * radeon_check_arguments - validate module params
855 *
856 * @rdev: radeon_device pointer
857 *
858 * Validates certain module parameters and updates
859 * the associated values used by the driver (all asics).
860 */
1109ca09 861static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
862{
863 /* vramlimit must be a power of two */
1bcb04f7 864 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
865 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
866 radeon_vram_limit);
867 radeon_vram_limit = 0;
36421338 868 }
1bcb04f7 869
36421338 870 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 871 if (radeon_gart_size < 32) {
36421338
JG
872 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
873 radeon_gart_size);
874 radeon_gart_size = 512;
1bcb04f7
CK
875
876 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
877 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
878 radeon_gart_size);
879 radeon_gart_size = 512;
36421338 880 }
1bcb04f7
CK
881 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
882
36421338
JG
883 /* AGP mode can only be -1, 1, 2, 4, 8 */
884 switch (radeon_agpmode) {
885 case -1:
886 case 0:
887 case 1:
888 case 2:
889 case 4:
890 case 8:
891 break;
892 default:
893 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
894 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
895 radeon_agpmode = 0;
896 break;
897 }
898}
899
d1f9809e
ML
900/**
901 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
902 * needed for waking up.
903 *
904 * @pdev: pci dev pointer
905 */
906static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
907{
908
909 /* 6600m in a macbook pro */
910 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
911 pdev->subsystem_device == 0x00e2) {
912 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
913 return true;
914 }
915
916 return false;
917}
918
0c195119
AD
919/**
920 * radeon_switcheroo_set_state - set switcheroo state
921 *
922 * @pdev: pci dev pointer
923 * @state: vga switcheroo state
924 *
925 * Callback for the switcheroo driver. Suspends or resumes the
926 * the asics before or after it is powered up using ACPI methods.
927 */
6a9ee8af
DA
928static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
929{
930 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
931 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
932 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
933 unsigned d3_delay = dev->pdev->d3_delay;
934
6a9ee8af
DA
935 printk(KERN_INFO "radeon: switched on\n");
936 /* don't suspend or resume card normally */
5bcf719b 937 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
938
939 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
940 dev->pdev->d3_delay = 20;
941
6a9ee8af 942 radeon_resume_kms(dev);
d1f9809e
ML
943
944 dev->pdev->d3_delay = d3_delay;
945
5bcf719b 946 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 947 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
948 } else {
949 printk(KERN_INFO "radeon: switched off\n");
fbf81762 950 drm_kms_helper_poll_disable(dev);
5bcf719b 951 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 952 radeon_suspend_kms(dev, pmm);
5bcf719b 953 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
954 }
955}
956
0c195119
AD
957/**
958 * radeon_switcheroo_can_switch - see if switcheroo state can change
959 *
960 * @pdev: pci dev pointer
961 *
962 * Callback for the switcheroo driver. Check of the switcheroo
963 * state can be changed.
964 * Returns true if the state can be changed, false if not.
965 */
6a9ee8af
DA
966static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
967{
968 struct drm_device *dev = pci_get_drvdata(pdev);
969 bool can_switch;
970
971 spin_lock(&dev->count_lock);
972 can_switch = (dev->open_count == 0);
973 spin_unlock(&dev->count_lock);
974 return can_switch;
975}
976
26ec685f
TI
977static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
978 .set_gpu_state = radeon_switcheroo_set_state,
979 .reprobe = NULL,
980 .can_switch = radeon_switcheroo_can_switch,
981};
6a9ee8af 982
0c195119
AD
983/**
984 * radeon_device_init - initialize the driver
985 *
986 * @rdev: radeon_device pointer
987 * @pdev: drm dev pointer
988 * @pdev: pci dev pointer
989 * @flags: driver flags
990 *
991 * Initializes the driver info and hw (all asics).
992 * Returns 0 for success or an error on failure.
993 * Called at driver startup.
994 */
771fe6b9
JG
995int radeon_device_init(struct radeon_device *rdev,
996 struct drm_device *ddev,
997 struct pci_dev *pdev,
998 uint32_t flags)
999{
351a52a2 1000 int r, i;
ad49f501 1001 int dma_bits;
771fe6b9 1002
771fe6b9 1003 rdev->shutdown = false;
9f022ddf 1004 rdev->dev = &pdev->dev;
771fe6b9
JG
1005 rdev->ddev = ddev;
1006 rdev->pdev = pdev;
1007 rdev->flags = flags;
1008 rdev->family = flags & RADEON_FAMILY_MASK;
1009 rdev->is_atom_bios = false;
1010 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1011 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 1012 rdev->accel_working = false;
8b25ed34
AD
1013 /* set up ring ids */
1014 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1015 rdev->ring[i].idx = i;
1016 }
1b5331d9 1017
d522d9cc
TR
1018 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1019 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1020 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1021
771fe6b9
JG
1022 /* mutex initialization are all done here so we
1023 * can recall function without having locking issues */
d6999bc7 1024 mutex_init(&rdev->ring_lock);
40bacf16 1025 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1026 atomic_set(&rdev->ih.lock, 0);
4c788679 1027 mutex_init(&rdev->gem.mutex);
c913e23a 1028 mutex_init(&rdev->pm.mutex);
6759a0a7 1029 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1030 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1031 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1032 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1033 r = radeon_gem_init(rdev);
1034 if (r)
1035 return r;
721604a1 1036 /* initialize vm here */
36ff39c4 1037 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1038 /* Adjust VM size here.
1039 * Currently set to 4GB ((1 << 20) 4k pages).
1040 * Max GPUVM size for cayman and SI is 40 bits.
1041 */
721604a1
JG
1042 rdev->vm_manager.max_pfn = 1 << 20;
1043 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1044
4aac0473
JG
1045 /* Set asic functions */
1046 r = radeon_asic_init(rdev);
36421338 1047 if (r)
4aac0473 1048 return r;
36421338 1049 radeon_check_arguments(rdev);
4aac0473 1050
f95df9ca
AD
1051 /* all of the newer IGP chips have an internal gart
1052 * However some rs4xx report as AGP, so remove that here.
1053 */
1054 if ((rdev->family >= CHIP_RS400) &&
1055 (rdev->flags & RADEON_IS_IGP)) {
1056 rdev->flags &= ~RADEON_IS_AGP;
1057 }
1058
30256a3f 1059 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1060 radeon_agp_disable(rdev);
771fe6b9
JG
1061 }
1062
ad49f501
DA
1063 /* set DMA mask + need_dma32 flags.
1064 * PCIE - can handle 40-bits.
005a83f1 1065 * IGP - can handle 40-bits
ad49f501 1066 * AGP - generally dma32 is safest
005a83f1 1067 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1068 */
1069 rdev->need_dma32 = false;
1070 if (rdev->flags & RADEON_IS_AGP)
1071 rdev->need_dma32 = true;
005a83f1 1072 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1073 (rdev->family <= CHIP_RS740))
ad49f501
DA
1074 rdev->need_dma32 = true;
1075
1076 dma_bits = rdev->need_dma32 ? 32 : 40;
1077 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1078 if (r) {
62fff811 1079 rdev->need_dma32 = true;
c52494f6 1080 dma_bits = 32;
771fe6b9
JG
1081 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1082 }
c52494f6
KRW
1083 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1084 if (r) {
1085 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1086 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1087 }
771fe6b9
JG
1088
1089 /* Registers mapping */
1090 /* TODO: block userspace mapping of io register */
2c385151 1091 spin_lock_init(&rdev->mmio_idx_lock);
01d73a69
JC
1092 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1093 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
1094 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1095 if (rdev->rmmio == NULL) {
1096 return -ENOMEM;
1097 }
1098 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1099 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1100
351a52a2
AD
1101 /* io port mapping */
1102 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1103 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1104 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1105 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1106 break;
1107 }
1108 }
1109 if (rdev->rio_mem == NULL)
1110 DRM_ERROR("Unable to find PCI I/O BAR\n");
1111
28d52043 1112 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1113 /* this will fail for cards that aren't VGA class devices, just
1114 * ignore it */
1115 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1116 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1117
3ce0a23d 1118 r = radeon_init(rdev);
b574f251 1119 if (r)
3ce0a23d 1120 return r;
3ce0a23d 1121
04eb2206
CK
1122 r = radeon_ib_ring_tests(rdev);
1123 if (r)
1124 DRM_ERROR("ib ring test failed (%d).\n", r);
1125
b574f251
JG
1126 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1127 /* Acceleration not working on AGP card try again
1128 * with fallback to PCI or PCIE GART
1129 */
a2d07b74 1130 radeon_asic_reset(rdev);
b574f251
JG
1131 radeon_fini(rdev);
1132 radeon_agp_disable(rdev);
1133 r = radeon_init(rdev);
4aac0473
JG
1134 if (r)
1135 return r;
771fe6b9 1136 }
60a7e396 1137 if ((radeon_testing & 1)) {
ecc0b326
MD
1138 radeon_test_moves(rdev);
1139 }
60a7e396
CK
1140 if ((radeon_testing & 2)) {
1141 radeon_test_syncing(rdev);
1142 }
771fe6b9 1143 if (radeon_benchmarking) {
638dd7db 1144 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1145 }
6cf8a3f5 1146 return 0;
771fe6b9
JG
1147}
1148
4d8bf9ae
CK
1149static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1150
0c195119
AD
1151/**
1152 * radeon_device_fini - tear down the driver
1153 *
1154 * @rdev: radeon_device pointer
1155 *
1156 * Tear down the driver info (all asics).
1157 * Called at driver shutdown.
1158 */
771fe6b9
JG
1159void radeon_device_fini(struct radeon_device *rdev)
1160{
771fe6b9
JG
1161 DRM_INFO("radeon: finishing device.\n");
1162 rdev->shutdown = true;
90aca4d2
JG
1163 /* evict vram memory */
1164 radeon_bo_evict_vram(rdev);
62a8ea3f 1165 radeon_fini(rdev);
6a9ee8af 1166 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1167 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1168 if (rdev->rio_mem)
1169 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1170 rdev->rio_mem = NULL;
771fe6b9
JG
1171 iounmap(rdev->rmmio);
1172 rdev->rmmio = NULL;
4d8bf9ae 1173 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1174}
1175
1176
1177/*
1178 * Suspend & resume.
1179 */
0c195119
AD
1180/**
1181 * radeon_suspend_kms - initiate device suspend
1182 *
1183 * @pdev: drm dev pointer
1184 * @state: suspend state
1185 *
1186 * Puts the hw in the suspend state (all asics).
1187 * Returns 0 for success or an error on failure.
1188 * Called at driver suspend.
1189 */
771fe6b9
JG
1190int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1191{
875c1866 1192 struct radeon_device *rdev;
771fe6b9 1193 struct drm_crtc *crtc;
d8dcaa1d 1194 struct drm_connector *connector;
7465280c 1195 int i, r;
5f8f635e 1196 bool force_completion = false;
771fe6b9 1197
875c1866 1198 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1199 return -ENODEV;
1200 }
1201 if (state.event == PM_EVENT_PRETHAW) {
1202 return 0;
1203 }
875c1866
DJ
1204 rdev = dev->dev_private;
1205
5bcf719b 1206 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1207 return 0;
d8dcaa1d 1208
86698c20
SF
1209 drm_kms_helper_poll_disable(dev);
1210
d8dcaa1d
AD
1211 /* turn off display hw */
1212 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1213 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1214 }
1215
771fe6b9
JG
1216 /* unpin the front buffers */
1217 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1218 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1219 struct radeon_bo *robj;
771fe6b9
JG
1220
1221 if (rfb == NULL || rfb->obj == NULL) {
1222 continue;
1223 }
7e4d15d9 1224 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1225 /* don't unpin kernel fb objects */
1226 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1227 r = radeon_bo_reserve(robj, false);
38651674 1228 if (r == 0) {
4c788679
JG
1229 radeon_bo_unpin(robj);
1230 radeon_bo_unreserve(robj);
1231 }
771fe6b9
JG
1232 }
1233 }
1234 /* evict vram memory */
4c788679 1235 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1236
1237 mutex_lock(&rdev->ring_lock);
771fe6b9 1238 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1239 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1240 r = radeon_fence_wait_empty_locked(rdev, i);
1241 if (r) {
1242 /* delay GPU reset to resume */
1243 force_completion = true;
1244 }
1245 }
1246 if (force_completion) {
1247 radeon_fence_driver_force_completion(rdev);
1248 }
8a47cc9e 1249 mutex_unlock(&rdev->ring_lock);
771fe6b9 1250
f657c2a7
YZ
1251 radeon_save_bios_scratch_regs(rdev);
1252
ce8f5370 1253 radeon_pm_suspend(rdev);
62a8ea3f 1254 radeon_suspend(rdev);
d4877cf2 1255 radeon_hpd_fini(rdev);
771fe6b9 1256 /* evict remaining vram memory */
4c788679 1257 radeon_bo_evict_vram(rdev);
771fe6b9 1258
10b06122
JG
1259 radeon_agp_suspend(rdev);
1260
771fe6b9
JG
1261 pci_save_state(dev->pdev);
1262 if (state.event == PM_EVENT_SUSPEND) {
1263 /* Shut down the device */
1264 pci_disable_device(dev->pdev);
1265 pci_set_power_state(dev->pdev, PCI_D3hot);
1266 }
ac751efa 1267 console_lock();
38651674 1268 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1269 console_unlock();
771fe6b9
JG
1270 return 0;
1271}
1272
0c195119
AD
1273/**
1274 * radeon_resume_kms - initiate device resume
1275 *
1276 * @pdev: drm dev pointer
1277 *
1278 * Bring the hw back to operating state (all asics).
1279 * Returns 0 for success or an error on failure.
1280 * Called at driver resume.
1281 */
771fe6b9
JG
1282int radeon_resume_kms(struct drm_device *dev)
1283{
09bdf591 1284 struct drm_connector *connector;
771fe6b9 1285 struct radeon_device *rdev = dev->dev_private;
04eb2206 1286 int r;
771fe6b9 1287
5bcf719b 1288 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1289 return 0;
1290
ac751efa 1291 console_lock();
771fe6b9
JG
1292 pci_set_power_state(dev->pdev, PCI_D0);
1293 pci_restore_state(dev->pdev);
1294 if (pci_enable_device(dev->pdev)) {
ac751efa 1295 console_unlock();
771fe6b9
JG
1296 return -1;
1297 }
0ebf1717
DA
1298 /* resume AGP if in use */
1299 radeon_agp_resume(rdev);
62a8ea3f 1300 radeon_resume(rdev);
04eb2206
CK
1301
1302 r = radeon_ib_ring_tests(rdev);
1303 if (r)
1304 DRM_ERROR("ib ring test failed (%d).\n", r);
1305
ce8f5370 1306 radeon_pm_resume(rdev);
f657c2a7 1307 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1308
38651674 1309 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1310 console_unlock();
771fe6b9 1311
3fa47d9e
AD
1312 /* init dig PHYs, disp eng pll */
1313 if (rdev->is_atom_bios) {
ac89af1e 1314 radeon_atom_encoder_init(rdev);
f3f1f03e 1315 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1316 /* turn on the BL */
1317 if (rdev->mode_info.bl_encoder) {
1318 u8 bl_level = radeon_get_backlight_level(rdev,
1319 rdev->mode_info.bl_encoder);
1320 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1321 bl_level);
1322 }
3fa47d9e 1323 }
d4877cf2
AD
1324 /* reset hpd state */
1325 radeon_hpd_init(rdev);
771fe6b9
JG
1326 /* blat the mode back in */
1327 drm_helper_resume_force_mode(dev);
a93f344d
AD
1328 /* turn on display hw */
1329 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1330 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1331 }
86698c20
SF
1332
1333 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1334 return 0;
1335}
1336
0c195119
AD
1337/**
1338 * radeon_gpu_reset - reset the asic
1339 *
1340 * @rdev: radeon device pointer
1341 *
1342 * Attempt the reset the GPU if it has hung (all asics).
1343 * Returns 0 for success or an error on failure.
1344 */
90aca4d2
JG
1345int radeon_gpu_reset(struct radeon_device *rdev)
1346{
55d7c221
CK
1347 unsigned ring_sizes[RADEON_NUM_RINGS];
1348 uint32_t *ring_data[RADEON_NUM_RINGS];
1349
1350 bool saved = false;
1351
1352 int i, r;
8fd1b84c 1353 int resched;
90aca4d2 1354
dee53e7f 1355 down_write(&rdev->exclusive_lock);
90aca4d2 1356 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1357 /* block TTM */
1358 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1359 radeon_suspend(rdev);
1360
55d7c221
CK
1361 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1362 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1363 &ring_data[i]);
1364 if (ring_sizes[i]) {
1365 saved = true;
1366 dev_info(rdev->dev, "Saved %d dwords of commands "
1367 "on ring %d.\n", ring_sizes[i], i);
1368 }
1369 }
1370
1371retry:
90aca4d2
JG
1372 r = radeon_asic_reset(rdev);
1373 if (!r) {
55d7c221 1374 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1375 radeon_resume(rdev);
55d7c221 1376 }
04eb2206 1377
55d7c221 1378 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1379
55d7c221
CK
1380 if (!r) {
1381 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1382 radeon_ring_restore(rdev, &rdev->ring[i],
1383 ring_sizes[i], ring_data[i]);
f54b350d
CK
1384 ring_sizes[i] = 0;
1385 ring_data[i] = NULL;
55d7c221
CK
1386 }
1387
1388 r = radeon_ib_ring_tests(rdev);
1389 if (r) {
1390 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1391 if (saved) {
f54b350d 1392 saved = false;
55d7c221
CK
1393 radeon_suspend(rdev);
1394 goto retry;
1395 }
1396 }
1397 } else {
76903b96 1398 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1399 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1400 kfree(ring_data[i]);
1401 }
90aca4d2 1402 }
7a1619b9 1403
d3493574
JG
1404 drm_helper_resume_force_mode(rdev->ddev);
1405
55d7c221 1406 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1407 if (r) {
1408 /* bad news, how to tell it to userspace ? */
1409 dev_info(rdev->dev, "GPU reset failed\n");
1410 }
1411
dee53e7f 1412 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1413 return r;
1414}
1415
771fe6b9
JG
1416
1417/*
1418 * Debugfs
1419 */
771fe6b9
JG
1420int radeon_debugfs_add_files(struct radeon_device *rdev,
1421 struct drm_info_list *files,
1422 unsigned nfiles)
1423{
1424 unsigned i;
1425
4d8bf9ae
CK
1426 for (i = 0; i < rdev->debugfs_count; i++) {
1427 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1428 /* Already registered */
1429 return 0;
1430 }
1431 }
c245cb9e 1432
4d8bf9ae 1433 i = rdev->debugfs_count + 1;
c245cb9e
MW
1434 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1435 DRM_ERROR("Reached maximum number of debugfs components.\n");
1436 DRM_ERROR("Report so we increase "
1437 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1438 return -EINVAL;
1439 }
4d8bf9ae
CK
1440 rdev->debugfs[rdev->debugfs_count].files = files;
1441 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1442 rdev->debugfs_count = i;
771fe6b9
JG
1443#if defined(CONFIG_DEBUG_FS)
1444 drm_debugfs_create_files(files, nfiles,
1445 rdev->ddev->control->debugfs_root,
1446 rdev->ddev->control);
1447 drm_debugfs_create_files(files, nfiles,
1448 rdev->ddev->primary->debugfs_root,
1449 rdev->ddev->primary);
1450#endif
1451 return 0;
1452}
1453
4d8bf9ae
CK
1454static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1455{
1456#if defined(CONFIG_DEBUG_FS)
1457 unsigned i;
1458
1459 for (i = 0; i < rdev->debugfs_count; i++) {
1460 drm_debugfs_remove_files(rdev->debugfs[i].files,
1461 rdev->debugfs[i].num_files,
1462 rdev->ddev->control);
1463 drm_debugfs_remove_files(rdev->debugfs[i].files,
1464 rdev->debugfs[i].num_files,
1465 rdev->ddev->primary);
1466 }
1467#endif
1468}
1469
771fe6b9
JG
1470#if defined(CONFIG_DEBUG_FS)
1471int radeon_debugfs_init(struct drm_minor *minor)
1472{
1473 return 0;
1474}
1475
1476void radeon_debugfs_cleanup(struct drm_minor *minor)
1477{
771fe6b9
JG
1478}
1479#endif
This page took 0.29275 seconds and 5 git commands to generate.