drm/radeon: allocate PPLLs from low to high
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
1b5331d9
JG
96 "LAST",
97};
98
0c195119
AD
99/**
100 * radeon_surface_init - Clear GPU surface registers.
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 105 */
3ce0a23d 106void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
107{
108 /* FIXME: check this out */
109 if (rdev->family < CHIP_R600) {
110 int i;
111
550e2d92
DA
112 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
113 if (rdev->surface_regs[i].bo)
114 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
115 else
116 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 117 }
e024e110
DA
118 /* enable surfaces */
119 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
120 }
121}
122
771fe6b9
JG
123/*
124 * GPU scratch registers helpers function.
125 */
0c195119
AD
126/**
127 * radeon_scratch_init - Init scratch register driver information.
128 *
129 * @rdev: radeon_device pointer
130 *
131 * Init CP scratch register driver information (r1xx-r5xx)
132 */
3ce0a23d 133void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
134{
135 int i;
136
137 /* FIXME: check this out */
138 if (rdev->family < CHIP_R300) {
139 rdev->scratch.num_reg = 5;
140 } else {
141 rdev->scratch.num_reg = 7;
142 }
724c80e1 143 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
144 for (i = 0; i < rdev->scratch.num_reg; i++) {
145 rdev->scratch.free[i] = true;
724c80e1 146 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
147 }
148}
149
0c195119
AD
150/**
151 * radeon_scratch_get - Allocate a scratch register
152 *
153 * @rdev: radeon_device pointer
154 * @reg: scratch register mmio offset
155 *
156 * Allocate a CP scratch register for use by the driver (all asics).
157 * Returns 0 on success or -EINVAL on failure.
158 */
771fe6b9
JG
159int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
160{
161 int i;
162
163 for (i = 0; i < rdev->scratch.num_reg; i++) {
164 if (rdev->scratch.free[i]) {
165 rdev->scratch.free[i] = false;
166 *reg = rdev->scratch.reg[i];
167 return 0;
168 }
169 }
170 return -EINVAL;
171}
172
0c195119
AD
173/**
174 * radeon_scratch_free - Free a scratch register
175 *
176 * @rdev: radeon_device pointer
177 * @reg: scratch register mmio offset
178 *
179 * Free a CP scratch register allocated for use by the driver (all asics)
180 */
771fe6b9
JG
181void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
182{
183 int i;
184
185 for (i = 0; i < rdev->scratch.num_reg; i++) {
186 if (rdev->scratch.reg[i] == reg) {
187 rdev->scratch.free[i] = true;
188 return;
189 }
190 }
191}
192
0c195119
AD
193/*
194 * radeon_wb_*()
195 * Writeback is the the method by which the the GPU updates special pages
196 * in memory with the status of certain GPU events (fences, ring pointers,
197 * etc.).
198 */
199
200/**
201 * radeon_wb_disable - Disable Writeback
202 *
203 * @rdev: radeon_device pointer
204 *
205 * Disables Writeback (all asics). Used for suspend.
206 */
724c80e1
AD
207void radeon_wb_disable(struct radeon_device *rdev)
208{
209 int r;
210
211 if (rdev->wb.wb_obj) {
212 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
213 if (unlikely(r != 0))
214 return;
215 radeon_bo_kunmap(rdev->wb.wb_obj);
216 radeon_bo_unpin(rdev->wb.wb_obj);
217 radeon_bo_unreserve(rdev->wb.wb_obj);
218 }
219 rdev->wb.enabled = false;
220}
221
0c195119
AD
222/**
223 * radeon_wb_fini - Disable Writeback and free memory
224 *
225 * @rdev: radeon_device pointer
226 *
227 * Disables Writeback and frees the Writeback memory (all asics).
228 * Used at driver shutdown.
229 */
724c80e1
AD
230void radeon_wb_fini(struct radeon_device *rdev)
231{
232 radeon_wb_disable(rdev);
233 if (rdev->wb.wb_obj) {
234 radeon_bo_unref(&rdev->wb.wb_obj);
235 rdev->wb.wb = NULL;
236 rdev->wb.wb_obj = NULL;
237 }
238}
239
0c195119
AD
240/**
241 * radeon_wb_init- Init Writeback driver info and allocate memory
242 *
243 * @rdev: radeon_device pointer
244 *
245 * Disables Writeback and frees the Writeback memory (all asics).
246 * Used at driver startup.
247 * Returns 0 on success or an -error on failure.
248 */
724c80e1
AD
249int radeon_wb_init(struct radeon_device *rdev)
250{
251 int r;
252
253 if (rdev->wb.wb_obj == NULL) {
441921d5 254 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 255 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
256 if (r) {
257 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
258 return r;
259 }
260 }
261 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
262 if (unlikely(r != 0)) {
263 radeon_wb_fini(rdev);
264 return r;
265 }
266 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
267 &rdev->wb.gpu_addr);
268 if (r) {
269 radeon_bo_unreserve(rdev->wb.wb_obj);
270 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
271 radeon_wb_fini(rdev);
272 return r;
273 }
274 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
275 radeon_bo_unreserve(rdev->wb.wb_obj);
276 if (r) {
277 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
278 radeon_wb_fini(rdev);
279 return r;
280 }
281
e6ba7599
AD
282 /* clear wb memory */
283 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
284 /* disable event_write fences */
285 rdev->wb.use_event = false;
724c80e1 286 /* disabled via module param */
3b7a2b24 287 if (radeon_no_wb == 1) {
724c80e1 288 rdev->wb.enabled = false;
3b7a2b24 289 } else {
724c80e1 290 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
291 /* often unreliable on AGP */
292 rdev->wb.enabled = false;
293 } else if (rdev->family < CHIP_R300) {
294 /* often unreliable on pre-r300 */
724c80e1 295 rdev->wb.enabled = false;
d0f8a854 296 } else {
724c80e1 297 rdev->wb.enabled = true;
d0f8a854 298 /* event_write fences are only available on r600+ */
3b7a2b24 299 if (rdev->family >= CHIP_R600) {
d0f8a854 300 rdev->wb.use_event = true;
3b7a2b24 301 }
d0f8a854 302 }
724c80e1 303 }
c994ead6
AD
304 /* always use writeback/events on NI, APUs */
305 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
306 rdev->wb.enabled = true;
307 rdev->wb.use_event = true;
308 }
724c80e1
AD
309
310 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
311
312 return 0;
313}
314
d594e46a
JG
315/**
316 * radeon_vram_location - try to find VRAM location
317 * @rdev: radeon device structure holding all necessary informations
318 * @mc: memory controller structure holding memory informations
319 * @base: base address at which to put VRAM
320 *
321 * Function will place try to place VRAM at base address provided
322 * as parameter (which is so far either PCI aperture address or
323 * for IGP TOM base address).
324 *
325 * If there is not enough space to fit the unvisible VRAM in the 32bits
326 * address space then we limit the VRAM size to the aperture.
327 *
328 * If we are using AGP and if the AGP aperture doesn't allow us to have
329 * room for all the VRAM than we restrict the VRAM to the PCI aperture
330 * size and print a warning.
331 *
332 * This function will never fails, worst case are limiting VRAM.
333 *
334 * Note: GTT start, end, size should be initialized before calling this
335 * function on AGP platform.
336 *
25985edc 337 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
338 * this shouldn't be a problem as we are using the PCI aperture as a reference.
339 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
340 * not IGP.
341 *
342 * Note: we use mc_vram_size as on some board we need to program the mc to
343 * cover the whole aperture even if VRAM size is inferior to aperture size
344 * Novell bug 204882 + along with lots of ubuntu ones
345 *
346 * Note: when limiting vram it's safe to overwritte real_vram_size because
347 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
348 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
349 * ones)
350 *
351 * Note: IGP TOM addr should be the same as the aperture addr, we don't
352 * explicitly check for that thought.
353 *
354 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 355 */
d594e46a 356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 357{
d594e46a
JG
358 mc->vram_start = base;
359 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
360 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
361 mc->real_vram_size = mc->aper_size;
362 mc->mc_vram_size = mc->aper_size;
363 }
364 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 365 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
366 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
367 mc->real_vram_size = mc->aper_size;
368 mc->mc_vram_size = mc->aper_size;
369 }
370 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
ba95c45a
MD
371 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
372 mc->real_vram_size = radeon_vram_limit;
dd7cc55a 373 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
374 mc->mc_vram_size >> 20, mc->vram_start,
375 mc->vram_end, mc->real_vram_size >> 20);
376}
771fe6b9 377
d594e46a
JG
378/**
379 * radeon_gtt_location - try to find GTT location
380 * @rdev: radeon device structure holding all necessary informations
381 * @mc: memory controller structure holding memory informations
382 *
383 * Function will place try to place GTT before or after VRAM.
384 *
385 * If GTT size is bigger than space left then we ajust GTT size.
386 * Thus function will never fails.
387 *
388 * FIXME: when reducing GTT size align new size on power of 2.
389 */
390void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
391{
392 u64 size_af, size_bf;
393
8d369bb1
AD
394 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
395 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
396 if (size_bf > size_af) {
397 if (mc->gtt_size > size_bf) {
398 dev_warn(rdev->dev, "limiting GTT\n");
399 mc->gtt_size = size_bf;
771fe6b9 400 }
8d369bb1 401 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 402 } else {
d594e46a
JG
403 if (mc->gtt_size > size_af) {
404 dev_warn(rdev->dev, "limiting GTT\n");
405 mc->gtt_size = size_af;
406 }
8d369bb1 407 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 408 }
d594e46a 409 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 410 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 411 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
412}
413
771fe6b9
JG
414/*
415 * GPU helpers function.
416 */
0c195119
AD
417/**
418 * radeon_card_posted - check if the hw has already been initialized
419 *
420 * @rdev: radeon_device pointer
421 *
422 * Check if the asic has been initialized (all asics).
423 * Used at driver startup.
424 * Returns true if initialized or false if not.
425 */
9f022ddf 426bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
427{
428 uint32_t reg;
429
bcc65fd8
MG
430 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
431 return false;
432
771fe6b9 433 /* first check CRTCs */
18007401
AD
434 if (ASIC_IS_DCE41(rdev)) {
435 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
436 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
437 if (reg & EVERGREEN_CRTC_MASTER_EN)
438 return true;
439 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
440 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
441 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
442 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
443 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
444 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
445 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
446 if (reg & EVERGREEN_CRTC_MASTER_EN)
447 return true;
448 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
449 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
450 RREG32(AVIVO_D2CRTC_CONTROL);
451 if (reg & AVIVO_CRTC_EN) {
452 return true;
453 }
454 } else {
455 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
456 RREG32(RADEON_CRTC2_GEN_CNTL);
457 if (reg & RADEON_CRTC_EN) {
458 return true;
459 }
460 }
461
462 /* then check MEM_SIZE, in case the crtcs are off */
463 if (rdev->family >= CHIP_R600)
464 reg = RREG32(R600_CONFIG_MEMSIZE);
465 else
466 reg = RREG32(RADEON_CONFIG_MEMSIZE);
467
468 if (reg)
469 return true;
470
471 return false;
472
473}
474
0c195119
AD
475/**
476 * radeon_update_bandwidth_info - update display bandwidth params
477 *
478 * @rdev: radeon_device pointer
479 *
480 * Used when sclk/mclk are switched or display modes are set.
481 * params are used to calculate display watermarks (all asics)
482 */
f47299c5
AD
483void radeon_update_bandwidth_info(struct radeon_device *rdev)
484{
485 fixed20_12 a;
8807286e
AD
486 u32 sclk = rdev->pm.current_sclk;
487 u32 mclk = rdev->pm.current_mclk;
f47299c5 488
8807286e
AD
489 /* sclk/mclk in Mhz */
490 a.full = dfixed_const(100);
491 rdev->pm.sclk.full = dfixed_const(sclk);
492 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
493 rdev->pm.mclk.full = dfixed_const(mclk);
494 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 495
8807286e 496 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 497 a.full = dfixed_const(16);
f47299c5 498 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 499 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
500 }
501}
502
0c195119
AD
503/**
504 * radeon_boot_test_post_card - check and possibly initialize the hw
505 *
506 * @rdev: radeon_device pointer
507 *
508 * Check if the asic is initialized and if not, attempt to initialize
509 * it (all asics).
510 * Returns true if initialized or false if not.
511 */
72542d77
DA
512bool radeon_boot_test_post_card(struct radeon_device *rdev)
513{
514 if (radeon_card_posted(rdev))
515 return true;
516
517 if (rdev->bios) {
518 DRM_INFO("GPU not posted. posting now...\n");
519 if (rdev->is_atom_bios)
520 atom_asic_init(rdev->mode_info.atom_context);
521 else
522 radeon_combios_asic_init(rdev->ddev);
523 return true;
524 } else {
525 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
526 return false;
527 }
528}
529
0c195119
AD
530/**
531 * radeon_dummy_page_init - init dummy page used by the driver
532 *
533 * @rdev: radeon_device pointer
534 *
535 * Allocate the dummy page used by the driver (all asics).
536 * This dummy page is used by the driver as a filler for gart entries
537 * when pages are taken out of the GART
538 * Returns 0 on sucess, -ENOMEM on failure.
539 */
3ce0a23d
JG
540int radeon_dummy_page_init(struct radeon_device *rdev)
541{
82568565
DA
542 if (rdev->dummy_page.page)
543 return 0;
3ce0a23d
JG
544 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
545 if (rdev->dummy_page.page == NULL)
546 return -ENOMEM;
547 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
548 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
549 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
550 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
551 __free_page(rdev->dummy_page.page);
552 rdev->dummy_page.page = NULL;
553 return -ENOMEM;
554 }
555 return 0;
556}
557
0c195119
AD
558/**
559 * radeon_dummy_page_fini - free dummy page used by the driver
560 *
561 * @rdev: radeon_device pointer
562 *
563 * Frees the dummy page used by the driver (all asics).
564 */
3ce0a23d
JG
565void radeon_dummy_page_fini(struct radeon_device *rdev)
566{
567 if (rdev->dummy_page.page == NULL)
568 return;
569 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
570 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
571 __free_page(rdev->dummy_page.page);
572 rdev->dummy_page.page = NULL;
573}
574
771fe6b9 575
771fe6b9 576/* ATOM accessor methods */
0c195119
AD
577/*
578 * ATOM is an interpreted byte code stored in tables in the vbios. The
579 * driver registers callbacks to access registers and the interpreter
580 * in the driver parses the tables and executes then to program specific
581 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
582 * atombios.h, and atom.c
583 */
584
585/**
586 * cail_pll_read - read PLL register
587 *
588 * @info: atom card_info pointer
589 * @reg: PLL register offset
590 *
591 * Provides a PLL register accessor for the atom interpreter (r4xx+).
592 * Returns the value of the PLL register.
593 */
771fe6b9
JG
594static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
595{
596 struct radeon_device *rdev = info->dev->dev_private;
597 uint32_t r;
598
599 r = rdev->pll_rreg(rdev, reg);
600 return r;
601}
602
0c195119
AD
603/**
604 * cail_pll_write - write PLL register
605 *
606 * @info: atom card_info pointer
607 * @reg: PLL register offset
608 * @val: value to write to the pll register
609 *
610 * Provides a PLL register accessor for the atom interpreter (r4xx+).
611 */
771fe6b9
JG
612static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
613{
614 struct radeon_device *rdev = info->dev->dev_private;
615
616 rdev->pll_wreg(rdev, reg, val);
617}
618
0c195119
AD
619/**
620 * cail_mc_read - read MC (Memory Controller) register
621 *
622 * @info: atom card_info pointer
623 * @reg: MC register offset
624 *
625 * Provides an MC register accessor for the atom interpreter (r4xx+).
626 * Returns the value of the MC register.
627 */
771fe6b9
JG
628static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
629{
630 struct radeon_device *rdev = info->dev->dev_private;
631 uint32_t r;
632
633 r = rdev->mc_rreg(rdev, reg);
634 return r;
635}
636
0c195119
AD
637/**
638 * cail_mc_write - write MC (Memory Controller) register
639 *
640 * @info: atom card_info pointer
641 * @reg: MC register offset
642 * @val: value to write to the pll register
643 *
644 * Provides a MC register accessor for the atom interpreter (r4xx+).
645 */
771fe6b9
JG
646static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
647{
648 struct radeon_device *rdev = info->dev->dev_private;
649
650 rdev->mc_wreg(rdev, reg, val);
651}
652
0c195119
AD
653/**
654 * cail_reg_write - write MMIO register
655 *
656 * @info: atom card_info pointer
657 * @reg: MMIO register offset
658 * @val: value to write to the pll register
659 *
660 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
661 */
771fe6b9
JG
662static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
663{
664 struct radeon_device *rdev = info->dev->dev_private;
665
666 WREG32(reg*4, val);
667}
668
0c195119
AD
669/**
670 * cail_reg_read - read MMIO register
671 *
672 * @info: atom card_info pointer
673 * @reg: MMIO register offset
674 *
675 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
676 * Returns the value of the MMIO register.
677 */
771fe6b9
JG
678static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
679{
680 struct radeon_device *rdev = info->dev->dev_private;
681 uint32_t r;
682
683 r = RREG32(reg*4);
684 return r;
685}
686
0c195119
AD
687/**
688 * cail_ioreg_write - write IO register
689 *
690 * @info: atom card_info pointer
691 * @reg: IO register offset
692 * @val: value to write to the pll register
693 *
694 * Provides a IO register accessor for the atom interpreter (r4xx+).
695 */
351a52a2
AD
696static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
697{
698 struct radeon_device *rdev = info->dev->dev_private;
699
700 WREG32_IO(reg*4, val);
701}
702
0c195119
AD
703/**
704 * cail_ioreg_read - read IO register
705 *
706 * @info: atom card_info pointer
707 * @reg: IO register offset
708 *
709 * Provides an IO register accessor for the atom interpreter (r4xx+).
710 * Returns the value of the IO register.
711 */
351a52a2
AD
712static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
713{
714 struct radeon_device *rdev = info->dev->dev_private;
715 uint32_t r;
716
717 r = RREG32_IO(reg*4);
718 return r;
719}
720
0c195119
AD
721/**
722 * radeon_atombios_init - init the driver info and callbacks for atombios
723 *
724 * @rdev: radeon_device pointer
725 *
726 * Initializes the driver info and register access callbacks for the
727 * ATOM interpreter (r4xx+).
728 * Returns 0 on sucess, -ENOMEM on failure.
729 * Called at driver startup.
730 */
771fe6b9
JG
731int radeon_atombios_init(struct radeon_device *rdev)
732{
61c4b24b
MF
733 struct card_info *atom_card_info =
734 kzalloc(sizeof(struct card_info), GFP_KERNEL);
735
736 if (!atom_card_info)
737 return -ENOMEM;
738
739 rdev->mode_info.atom_card_info = atom_card_info;
740 atom_card_info->dev = rdev->ddev;
741 atom_card_info->reg_read = cail_reg_read;
742 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
743 /* needed for iio ops */
744 if (rdev->rio_mem) {
745 atom_card_info->ioreg_read = cail_ioreg_read;
746 atom_card_info->ioreg_write = cail_ioreg_write;
747 } else {
748 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
749 atom_card_info->ioreg_read = cail_reg_read;
750 atom_card_info->ioreg_write = cail_reg_write;
751 }
61c4b24b
MF
752 atom_card_info->mc_read = cail_mc_read;
753 atom_card_info->mc_write = cail_mc_write;
754 atom_card_info->pll_read = cail_pll_read;
755 atom_card_info->pll_write = cail_pll_write;
756
757 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
c31ad97f 758 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 759 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 760 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
761 return 0;
762}
763
0c195119
AD
764/**
765 * radeon_atombios_fini - free the driver info and callbacks for atombios
766 *
767 * @rdev: radeon_device pointer
768 *
769 * Frees the driver info and register access callbacks for the ATOM
770 * interpreter (r4xx+).
771 * Called at driver shutdown.
772 */
771fe6b9
JG
773void radeon_atombios_fini(struct radeon_device *rdev)
774{
4a04a844
JG
775 if (rdev->mode_info.atom_context) {
776 kfree(rdev->mode_info.atom_context->scratch);
777 kfree(rdev->mode_info.atom_context);
778 }
61c4b24b 779 kfree(rdev->mode_info.atom_card_info);
771fe6b9
JG
780}
781
0c195119
AD
782/* COMBIOS */
783/*
784 * COMBIOS is the bios format prior to ATOM. It provides
785 * command tables similar to ATOM, but doesn't have a unified
786 * parser. See radeon_combios.c
787 */
788
789/**
790 * radeon_combios_init - init the driver info for combios
791 *
792 * @rdev: radeon_device pointer
793 *
794 * Initializes the driver info for combios (r1xx-r3xx).
795 * Returns 0 on sucess.
796 * Called at driver startup.
797 */
771fe6b9
JG
798int radeon_combios_init(struct radeon_device *rdev)
799{
800 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
801 return 0;
802}
803
0c195119
AD
804/**
805 * radeon_combios_fini - free the driver info for combios
806 *
807 * @rdev: radeon_device pointer
808 *
809 * Frees the driver info for combios (r1xx-r3xx).
810 * Called at driver shutdown.
811 */
771fe6b9
JG
812void radeon_combios_fini(struct radeon_device *rdev)
813{
814}
815
0c195119
AD
816/* if we get transitioned to only one device, take VGA back */
817/**
818 * radeon_vga_set_decode - enable/disable vga decode
819 *
820 * @cookie: radeon_device pointer
821 * @state: enable/disable vga decode
822 *
823 * Enable/disable vga decode (all asics).
824 * Returns VGA resource flags.
825 */
28d52043
DA
826static unsigned int radeon_vga_set_decode(void *cookie, bool state)
827{
828 struct radeon_device *rdev = cookie;
28d52043
DA
829 radeon_vga_set_state(rdev, state);
830 if (state)
831 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
832 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
833 else
834 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
835}
c1176d6f 836
0c195119
AD
837/**
838 * radeon_check_arguments - validate module params
839 *
840 * @rdev: radeon_device pointer
841 *
842 * Validates certain module parameters and updates
843 * the associated values used by the driver (all asics).
844 */
1109ca09 845static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
846{
847 /* vramlimit must be a power of two */
848 switch (radeon_vram_limit) {
849 case 0:
850 case 4:
851 case 8:
852 case 16:
853 case 32:
854 case 64:
855 case 128:
856 case 256:
857 case 512:
858 case 1024:
859 case 2048:
860 case 4096:
861 break;
862 default:
863 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
864 radeon_vram_limit);
865 radeon_vram_limit = 0;
866 break;
867 }
868 radeon_vram_limit = radeon_vram_limit << 20;
869 /* gtt size must be power of two and greater or equal to 32M */
870 switch (radeon_gart_size) {
871 case 4:
872 case 8:
873 case 16:
874 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
875 radeon_gart_size);
876 radeon_gart_size = 512;
877 break;
878 case 32:
879 case 64:
880 case 128:
881 case 256:
882 case 512:
883 case 1024:
884 case 2048:
885 case 4096:
886 break;
887 default:
888 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
889 radeon_gart_size);
890 radeon_gart_size = 512;
891 break;
892 }
893 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
894 /* AGP mode can only be -1, 1, 2, 4, 8 */
895 switch (radeon_agpmode) {
896 case -1:
897 case 0:
898 case 1:
899 case 2:
900 case 4:
901 case 8:
902 break;
903 default:
904 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
905 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
906 radeon_agpmode = 0;
907 break;
908 }
909}
910
0c195119
AD
911/**
912 * radeon_switcheroo_set_state - set switcheroo state
913 *
914 * @pdev: pci dev pointer
915 * @state: vga switcheroo state
916 *
917 * Callback for the switcheroo driver. Suspends or resumes the
918 * the asics before or after it is powered up using ACPI methods.
919 */
6a9ee8af
DA
920static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
921{
922 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
923 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
924 if (state == VGA_SWITCHEROO_ON) {
925 printk(KERN_INFO "radeon: switched on\n");
926 /* don't suspend or resume card normally */
5bcf719b 927 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 928 radeon_resume_kms(dev);
5bcf719b 929 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 930 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
931 } else {
932 printk(KERN_INFO "radeon: switched off\n");
fbf81762 933 drm_kms_helper_poll_disable(dev);
5bcf719b 934 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 935 radeon_suspend_kms(dev, pmm);
5bcf719b 936 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
937 }
938}
939
0c195119
AD
940/**
941 * radeon_switcheroo_can_switch - see if switcheroo state can change
942 *
943 * @pdev: pci dev pointer
944 *
945 * Callback for the switcheroo driver. Check of the switcheroo
946 * state can be changed.
947 * Returns true if the state can be changed, false if not.
948 */
6a9ee8af
DA
949static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
950{
951 struct drm_device *dev = pci_get_drvdata(pdev);
952 bool can_switch;
953
954 spin_lock(&dev->count_lock);
955 can_switch = (dev->open_count == 0);
956 spin_unlock(&dev->count_lock);
957 return can_switch;
958}
959
26ec685f
TI
960static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
961 .set_gpu_state = radeon_switcheroo_set_state,
962 .reprobe = NULL,
963 .can_switch = radeon_switcheroo_can_switch,
964};
6a9ee8af 965
0c195119
AD
966/**
967 * radeon_device_init - initialize the driver
968 *
969 * @rdev: radeon_device pointer
970 * @pdev: drm dev pointer
971 * @pdev: pci dev pointer
972 * @flags: driver flags
973 *
974 * Initializes the driver info and hw (all asics).
975 * Returns 0 for success or an error on failure.
976 * Called at driver startup.
977 */
771fe6b9
JG
978int radeon_device_init(struct radeon_device *rdev,
979 struct drm_device *ddev,
980 struct pci_dev *pdev,
981 uint32_t flags)
982{
351a52a2 983 int r, i;
ad49f501 984 int dma_bits;
771fe6b9 985
771fe6b9 986 rdev->shutdown = false;
9f022ddf 987 rdev->dev = &pdev->dev;
771fe6b9
JG
988 rdev->ddev = ddev;
989 rdev->pdev = pdev;
990 rdev->flags = flags;
991 rdev->family = flags & RADEON_FAMILY_MASK;
992 rdev->is_atom_bios = false;
993 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
994 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 995 rdev->accel_working = false;
8b25ed34
AD
996 /* set up ring ids */
997 for (i = 0; i < RADEON_NUM_RINGS; i++) {
998 rdev->ring[i].idx = i;
999 }
1b5331d9 1000
d522d9cc
TR
1001 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1002 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1003 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1004
771fe6b9
JG
1005 /* mutex initialization are all done here so we
1006 * can recall function without having locking issues */
d6999bc7 1007 mutex_init(&rdev->ring_lock);
40bacf16 1008 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1009 atomic_set(&rdev->ih.lock, 0);
4c788679 1010 mutex_init(&rdev->gem.mutex);
c913e23a 1011 mutex_init(&rdev->pm.mutex);
6759a0a7 1012 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1013 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1014 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1015 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1016 r = radeon_gem_init(rdev);
1017 if (r)
1018 return r;
721604a1 1019 /* initialize vm here */
36ff39c4 1020 mutex_init(&rdev->vm_manager.lock);
721604a1
JG
1021 rdev->vm_manager.max_pfn = 1 << 20;
1022 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1023
4aac0473
JG
1024 /* Set asic functions */
1025 r = radeon_asic_init(rdev);
36421338 1026 if (r)
4aac0473 1027 return r;
36421338 1028 radeon_check_arguments(rdev);
4aac0473 1029
f95df9ca
AD
1030 /* all of the newer IGP chips have an internal gart
1031 * However some rs4xx report as AGP, so remove that here.
1032 */
1033 if ((rdev->family >= CHIP_RS400) &&
1034 (rdev->flags & RADEON_IS_IGP)) {
1035 rdev->flags &= ~RADEON_IS_AGP;
1036 }
1037
30256a3f 1038 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1039 radeon_agp_disable(rdev);
771fe6b9
JG
1040 }
1041
ad49f501
DA
1042 /* set DMA mask + need_dma32 flags.
1043 * PCIE - can handle 40-bits.
005a83f1 1044 * IGP - can handle 40-bits
ad49f501 1045 * AGP - generally dma32 is safest
005a83f1 1046 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1047 */
1048 rdev->need_dma32 = false;
1049 if (rdev->flags & RADEON_IS_AGP)
1050 rdev->need_dma32 = true;
005a83f1 1051 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1052 (rdev->family <= CHIP_RS740))
ad49f501
DA
1053 rdev->need_dma32 = true;
1054
1055 dma_bits = rdev->need_dma32 ? 32 : 40;
1056 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1057 if (r) {
62fff811 1058 rdev->need_dma32 = true;
c52494f6 1059 dma_bits = 32;
771fe6b9
JG
1060 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1061 }
c52494f6
KRW
1062 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1063 if (r) {
1064 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1065 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1066 }
771fe6b9
JG
1067
1068 /* Registers mapping */
1069 /* TODO: block userspace mapping of io register */
01d73a69
JC
1070 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1071 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
1072 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1073 if (rdev->rmmio == NULL) {
1074 return -ENOMEM;
1075 }
1076 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1077 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1078
351a52a2
AD
1079 /* io port mapping */
1080 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1081 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1082 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1083 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1084 break;
1085 }
1086 }
1087 if (rdev->rio_mem == NULL)
1088 DRM_ERROR("Unable to find PCI I/O BAR\n");
1089
28d52043 1090 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1091 /* this will fail for cards that aren't VGA class devices, just
1092 * ignore it */
1093 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1094 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1095
3ce0a23d 1096 r = radeon_init(rdev);
b574f251 1097 if (r)
3ce0a23d 1098 return r;
3ce0a23d 1099
04eb2206
CK
1100 r = radeon_ib_ring_tests(rdev);
1101 if (r)
1102 DRM_ERROR("ib ring test failed (%d).\n", r);
1103
b574f251
JG
1104 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1105 /* Acceleration not working on AGP card try again
1106 * with fallback to PCI or PCIE GART
1107 */
a2d07b74 1108 radeon_asic_reset(rdev);
b574f251
JG
1109 radeon_fini(rdev);
1110 radeon_agp_disable(rdev);
1111 r = radeon_init(rdev);
4aac0473
JG
1112 if (r)
1113 return r;
771fe6b9 1114 }
60a7e396 1115 if ((radeon_testing & 1)) {
ecc0b326
MD
1116 radeon_test_moves(rdev);
1117 }
60a7e396
CK
1118 if ((radeon_testing & 2)) {
1119 radeon_test_syncing(rdev);
1120 }
771fe6b9 1121 if (radeon_benchmarking) {
638dd7db 1122 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1123 }
6cf8a3f5 1124 return 0;
771fe6b9
JG
1125}
1126
4d8bf9ae
CK
1127static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1128
0c195119
AD
1129/**
1130 * radeon_device_fini - tear down the driver
1131 *
1132 * @rdev: radeon_device pointer
1133 *
1134 * Tear down the driver info (all asics).
1135 * Called at driver shutdown.
1136 */
771fe6b9
JG
1137void radeon_device_fini(struct radeon_device *rdev)
1138{
771fe6b9
JG
1139 DRM_INFO("radeon: finishing device.\n");
1140 rdev->shutdown = true;
90aca4d2
JG
1141 /* evict vram memory */
1142 radeon_bo_evict_vram(rdev);
62a8ea3f 1143 radeon_fini(rdev);
6a9ee8af 1144 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1145 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1146 if (rdev->rio_mem)
1147 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1148 rdev->rio_mem = NULL;
771fe6b9
JG
1149 iounmap(rdev->rmmio);
1150 rdev->rmmio = NULL;
4d8bf9ae 1151 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1152}
1153
1154
1155/*
1156 * Suspend & resume.
1157 */
0c195119
AD
1158/**
1159 * radeon_suspend_kms - initiate device suspend
1160 *
1161 * @pdev: drm dev pointer
1162 * @state: suspend state
1163 *
1164 * Puts the hw in the suspend state (all asics).
1165 * Returns 0 for success or an error on failure.
1166 * Called at driver suspend.
1167 */
771fe6b9
JG
1168int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1169{
875c1866 1170 struct radeon_device *rdev;
771fe6b9 1171 struct drm_crtc *crtc;
d8dcaa1d 1172 struct drm_connector *connector;
7465280c 1173 int i, r;
771fe6b9 1174
875c1866 1175 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1176 return -ENODEV;
1177 }
1178 if (state.event == PM_EVENT_PRETHAW) {
1179 return 0;
1180 }
875c1866
DJ
1181 rdev = dev->dev_private;
1182
5bcf719b 1183 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1184 return 0;
d8dcaa1d 1185
86698c20
SF
1186 drm_kms_helper_poll_disable(dev);
1187
d8dcaa1d
AD
1188 /* turn off display hw */
1189 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1190 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1191 }
1192
771fe6b9
JG
1193 /* unpin the front buffers */
1194 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1195 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1196 struct radeon_bo *robj;
771fe6b9
JG
1197
1198 if (rfb == NULL || rfb->obj == NULL) {
1199 continue;
1200 }
7e4d15d9 1201 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1202 /* don't unpin kernel fb objects */
1203 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1204 r = radeon_bo_reserve(robj, false);
38651674 1205 if (r == 0) {
4c788679
JG
1206 radeon_bo_unpin(robj);
1207 radeon_bo_unreserve(robj);
1208 }
771fe6b9
JG
1209 }
1210 }
1211 /* evict vram memory */
4c788679 1212 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1213
1214 mutex_lock(&rdev->ring_lock);
771fe6b9 1215 /* wait for gpu to finish processing current batch */
7465280c 1216 for (i = 0; i < RADEON_NUM_RINGS; i++)
8a47cc9e
CK
1217 radeon_fence_wait_empty_locked(rdev, i);
1218 mutex_unlock(&rdev->ring_lock);
771fe6b9 1219
f657c2a7
YZ
1220 radeon_save_bios_scratch_regs(rdev);
1221
ce8f5370 1222 radeon_pm_suspend(rdev);
62a8ea3f 1223 radeon_suspend(rdev);
d4877cf2 1224 radeon_hpd_fini(rdev);
771fe6b9 1225 /* evict remaining vram memory */
4c788679 1226 radeon_bo_evict_vram(rdev);
771fe6b9 1227
10b06122
JG
1228 radeon_agp_suspend(rdev);
1229
771fe6b9
JG
1230 pci_save_state(dev->pdev);
1231 if (state.event == PM_EVENT_SUSPEND) {
1232 /* Shut down the device */
1233 pci_disable_device(dev->pdev);
1234 pci_set_power_state(dev->pdev, PCI_D3hot);
1235 }
ac751efa 1236 console_lock();
38651674 1237 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1238 console_unlock();
771fe6b9
JG
1239 return 0;
1240}
1241
0c195119
AD
1242/**
1243 * radeon_resume_kms - initiate device resume
1244 *
1245 * @pdev: drm dev pointer
1246 *
1247 * Bring the hw back to operating state (all asics).
1248 * Returns 0 for success or an error on failure.
1249 * Called at driver resume.
1250 */
771fe6b9
JG
1251int radeon_resume_kms(struct drm_device *dev)
1252{
09bdf591 1253 struct drm_connector *connector;
771fe6b9 1254 struct radeon_device *rdev = dev->dev_private;
04eb2206 1255 int r;
771fe6b9 1256
5bcf719b 1257 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1258 return 0;
1259
ac751efa 1260 console_lock();
771fe6b9
JG
1261 pci_set_power_state(dev->pdev, PCI_D0);
1262 pci_restore_state(dev->pdev);
1263 if (pci_enable_device(dev->pdev)) {
ac751efa 1264 console_unlock();
771fe6b9
JG
1265 return -1;
1266 }
0ebf1717
DA
1267 /* resume AGP if in use */
1268 radeon_agp_resume(rdev);
62a8ea3f 1269 radeon_resume(rdev);
04eb2206
CK
1270
1271 r = radeon_ib_ring_tests(rdev);
1272 if (r)
1273 DRM_ERROR("ib ring test failed (%d).\n", r);
1274
ce8f5370 1275 radeon_pm_resume(rdev);
f657c2a7 1276 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1277
38651674 1278 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1279 console_unlock();
771fe6b9 1280
3fa47d9e
AD
1281 /* init dig PHYs, disp eng pll */
1282 if (rdev->is_atom_bios) {
ac89af1e 1283 radeon_atom_encoder_init(rdev);
f3f1f03e 1284 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1285 /* turn on the BL */
1286 if (rdev->mode_info.bl_encoder) {
1287 u8 bl_level = radeon_get_backlight_level(rdev,
1288 rdev->mode_info.bl_encoder);
1289 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1290 bl_level);
1291 }
3fa47d9e 1292 }
d4877cf2
AD
1293 /* reset hpd state */
1294 radeon_hpd_init(rdev);
771fe6b9
JG
1295 /* blat the mode back in */
1296 drm_helper_resume_force_mode(dev);
a93f344d
AD
1297 /* turn on display hw */
1298 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1299 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1300 }
86698c20
SF
1301
1302 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1303 return 0;
1304}
1305
0c195119
AD
1306/**
1307 * radeon_gpu_reset - reset the asic
1308 *
1309 * @rdev: radeon device pointer
1310 *
1311 * Attempt the reset the GPU if it has hung (all asics).
1312 * Returns 0 for success or an error on failure.
1313 */
90aca4d2
JG
1314int radeon_gpu_reset(struct radeon_device *rdev)
1315{
55d7c221
CK
1316 unsigned ring_sizes[RADEON_NUM_RINGS];
1317 uint32_t *ring_data[RADEON_NUM_RINGS];
1318
1319 bool saved = false;
1320
1321 int i, r;
8fd1b84c 1322 int resched;
90aca4d2 1323
dee53e7f 1324 down_write(&rdev->exclusive_lock);
90aca4d2 1325 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1326 /* block TTM */
1327 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1328 radeon_suspend(rdev);
1329
55d7c221
CK
1330 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1331 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1332 &ring_data[i]);
1333 if (ring_sizes[i]) {
1334 saved = true;
1335 dev_info(rdev->dev, "Saved %d dwords of commands "
1336 "on ring %d.\n", ring_sizes[i], i);
1337 }
1338 }
1339
1340retry:
90aca4d2
JG
1341 r = radeon_asic_reset(rdev);
1342 if (!r) {
55d7c221 1343 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1344 radeon_resume(rdev);
55d7c221 1345 }
04eb2206 1346
55d7c221
CK
1347 radeon_restore_bios_scratch_regs(rdev);
1348 drm_helper_resume_force_mode(rdev->ddev);
04eb2206 1349
55d7c221
CK
1350 if (!r) {
1351 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1352 radeon_ring_restore(rdev, &rdev->ring[i],
1353 ring_sizes[i], ring_data[i]);
f54b350d
CK
1354 ring_sizes[i] = 0;
1355 ring_data[i] = NULL;
55d7c221
CK
1356 }
1357
1358 r = radeon_ib_ring_tests(rdev);
1359 if (r) {
1360 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1361 if (saved) {
f54b350d 1362 saved = false;
55d7c221
CK
1363 radeon_suspend(rdev);
1364 goto retry;
1365 }
1366 }
1367 } else {
1368 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1369 kfree(ring_data[i]);
1370 }
90aca4d2 1371 }
7a1619b9 1372
55d7c221 1373 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1374 if (r) {
1375 /* bad news, how to tell it to userspace ? */
1376 dev_info(rdev->dev, "GPU reset failed\n");
1377 }
1378
dee53e7f 1379 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1380 return r;
1381}
1382
771fe6b9
JG
1383
1384/*
1385 * Debugfs
1386 */
771fe6b9
JG
1387int radeon_debugfs_add_files(struct radeon_device *rdev,
1388 struct drm_info_list *files,
1389 unsigned nfiles)
1390{
1391 unsigned i;
1392
4d8bf9ae
CK
1393 for (i = 0; i < rdev->debugfs_count; i++) {
1394 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1395 /* Already registered */
1396 return 0;
1397 }
1398 }
c245cb9e 1399
4d8bf9ae 1400 i = rdev->debugfs_count + 1;
c245cb9e
MW
1401 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1402 DRM_ERROR("Reached maximum number of debugfs components.\n");
1403 DRM_ERROR("Report so we increase "
1404 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1405 return -EINVAL;
1406 }
4d8bf9ae
CK
1407 rdev->debugfs[rdev->debugfs_count].files = files;
1408 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1409 rdev->debugfs_count = i;
771fe6b9
JG
1410#if defined(CONFIG_DEBUG_FS)
1411 drm_debugfs_create_files(files, nfiles,
1412 rdev->ddev->control->debugfs_root,
1413 rdev->ddev->control);
1414 drm_debugfs_create_files(files, nfiles,
1415 rdev->ddev->primary->debugfs_root,
1416 rdev->ddev->primary);
1417#endif
1418 return 0;
1419}
1420
4d8bf9ae
CK
1421static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1422{
1423#if defined(CONFIG_DEBUG_FS)
1424 unsigned i;
1425
1426 for (i = 0; i < rdev->debugfs_count; i++) {
1427 drm_debugfs_remove_files(rdev->debugfs[i].files,
1428 rdev->debugfs[i].num_files,
1429 rdev->ddev->control);
1430 drm_debugfs_remove_files(rdev->debugfs[i].files,
1431 rdev->debugfs[i].num_files,
1432 rdev->ddev->primary);
1433 }
1434#endif
1435}
1436
771fe6b9
JG
1437#if defined(CONFIG_DEBUG_FS)
1438int radeon_debugfs_init(struct drm_minor *minor)
1439{
1440 return 0;
1441}
1442
1443void radeon_debugfs_cleanup(struct drm_minor *minor)
1444{
771fe6b9
JG
1445}
1446#endif
This page took 0.314394 seconds and 5 git commands to generate.