drm/radeon: fix and simplify pot argument checks v3
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
1b5331d9
JG
96 "LAST",
97};
98
0c195119
AD
99/**
100 * radeon_surface_init - Clear GPU surface registers.
101 *
102 * @rdev: radeon_device pointer
103 *
104 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 105 */
3ce0a23d 106void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
107{
108 /* FIXME: check this out */
109 if (rdev->family < CHIP_R600) {
110 int i;
111
550e2d92
DA
112 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
113 if (rdev->surface_regs[i].bo)
114 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
115 else
116 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 117 }
e024e110
DA
118 /* enable surfaces */
119 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
120 }
121}
122
771fe6b9
JG
123/*
124 * GPU scratch registers helpers function.
125 */
0c195119
AD
126/**
127 * radeon_scratch_init - Init scratch register driver information.
128 *
129 * @rdev: radeon_device pointer
130 *
131 * Init CP scratch register driver information (r1xx-r5xx)
132 */
3ce0a23d 133void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
134{
135 int i;
136
137 /* FIXME: check this out */
138 if (rdev->family < CHIP_R300) {
139 rdev->scratch.num_reg = 5;
140 } else {
141 rdev->scratch.num_reg = 7;
142 }
724c80e1 143 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
144 for (i = 0; i < rdev->scratch.num_reg; i++) {
145 rdev->scratch.free[i] = true;
724c80e1 146 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
147 }
148}
149
0c195119
AD
150/**
151 * radeon_scratch_get - Allocate a scratch register
152 *
153 * @rdev: radeon_device pointer
154 * @reg: scratch register mmio offset
155 *
156 * Allocate a CP scratch register for use by the driver (all asics).
157 * Returns 0 on success or -EINVAL on failure.
158 */
771fe6b9
JG
159int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
160{
161 int i;
162
163 for (i = 0; i < rdev->scratch.num_reg; i++) {
164 if (rdev->scratch.free[i]) {
165 rdev->scratch.free[i] = false;
166 *reg = rdev->scratch.reg[i];
167 return 0;
168 }
169 }
170 return -EINVAL;
171}
172
0c195119
AD
173/**
174 * radeon_scratch_free - Free a scratch register
175 *
176 * @rdev: radeon_device pointer
177 * @reg: scratch register mmio offset
178 *
179 * Free a CP scratch register allocated for use by the driver (all asics)
180 */
771fe6b9
JG
181void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
182{
183 int i;
184
185 for (i = 0; i < rdev->scratch.num_reg; i++) {
186 if (rdev->scratch.reg[i] == reg) {
187 rdev->scratch.free[i] = true;
188 return;
189 }
190 }
191}
192
0c195119
AD
193/*
194 * radeon_wb_*()
195 * Writeback is the the method by which the the GPU updates special pages
196 * in memory with the status of certain GPU events (fences, ring pointers,
197 * etc.).
198 */
199
200/**
201 * radeon_wb_disable - Disable Writeback
202 *
203 * @rdev: radeon_device pointer
204 *
205 * Disables Writeback (all asics). Used for suspend.
206 */
724c80e1
AD
207void radeon_wb_disable(struct radeon_device *rdev)
208{
209 int r;
210
211 if (rdev->wb.wb_obj) {
212 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
213 if (unlikely(r != 0))
214 return;
215 radeon_bo_kunmap(rdev->wb.wb_obj);
216 radeon_bo_unpin(rdev->wb.wb_obj);
217 radeon_bo_unreserve(rdev->wb.wb_obj);
218 }
219 rdev->wb.enabled = false;
220}
221
0c195119
AD
222/**
223 * radeon_wb_fini - Disable Writeback and free memory
224 *
225 * @rdev: radeon_device pointer
226 *
227 * Disables Writeback and frees the Writeback memory (all asics).
228 * Used at driver shutdown.
229 */
724c80e1
AD
230void radeon_wb_fini(struct radeon_device *rdev)
231{
232 radeon_wb_disable(rdev);
233 if (rdev->wb.wb_obj) {
234 radeon_bo_unref(&rdev->wb.wb_obj);
235 rdev->wb.wb = NULL;
236 rdev->wb.wb_obj = NULL;
237 }
238}
239
0c195119
AD
240/**
241 * radeon_wb_init- Init Writeback driver info and allocate memory
242 *
243 * @rdev: radeon_device pointer
244 *
245 * Disables Writeback and frees the Writeback memory (all asics).
246 * Used at driver startup.
247 * Returns 0 on success or an -error on failure.
248 */
724c80e1
AD
249int radeon_wb_init(struct radeon_device *rdev)
250{
251 int r;
252
253 if (rdev->wb.wb_obj == NULL) {
441921d5 254 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 255 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
256 if (r) {
257 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
258 return r;
259 }
260 }
261 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
262 if (unlikely(r != 0)) {
263 radeon_wb_fini(rdev);
264 return r;
265 }
266 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
267 &rdev->wb.gpu_addr);
268 if (r) {
269 radeon_bo_unreserve(rdev->wb.wb_obj);
270 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
271 radeon_wb_fini(rdev);
272 return r;
273 }
274 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
275 radeon_bo_unreserve(rdev->wb.wb_obj);
276 if (r) {
277 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
278 radeon_wb_fini(rdev);
279 return r;
280 }
281
e6ba7599
AD
282 /* clear wb memory */
283 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
284 /* disable event_write fences */
285 rdev->wb.use_event = false;
724c80e1 286 /* disabled via module param */
3b7a2b24 287 if (radeon_no_wb == 1) {
724c80e1 288 rdev->wb.enabled = false;
3b7a2b24 289 } else {
724c80e1 290 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
291 /* often unreliable on AGP */
292 rdev->wb.enabled = false;
293 } else if (rdev->family < CHIP_R300) {
294 /* often unreliable on pre-r300 */
724c80e1 295 rdev->wb.enabled = false;
d0f8a854 296 } else {
724c80e1 297 rdev->wb.enabled = true;
d0f8a854 298 /* event_write fences are only available on r600+ */
3b7a2b24 299 if (rdev->family >= CHIP_R600) {
d0f8a854 300 rdev->wb.use_event = true;
3b7a2b24 301 }
d0f8a854 302 }
724c80e1 303 }
c994ead6
AD
304 /* always use writeback/events on NI, APUs */
305 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
306 rdev->wb.enabled = true;
307 rdev->wb.use_event = true;
308 }
724c80e1
AD
309
310 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
311
312 return 0;
313}
314
d594e46a
JG
315/**
316 * radeon_vram_location - try to find VRAM location
317 * @rdev: radeon device structure holding all necessary informations
318 * @mc: memory controller structure holding memory informations
319 * @base: base address at which to put VRAM
320 *
321 * Function will place try to place VRAM at base address provided
322 * as parameter (which is so far either PCI aperture address or
323 * for IGP TOM base address).
324 *
325 * If there is not enough space to fit the unvisible VRAM in the 32bits
326 * address space then we limit the VRAM size to the aperture.
327 *
328 * If we are using AGP and if the AGP aperture doesn't allow us to have
329 * room for all the VRAM than we restrict the VRAM to the PCI aperture
330 * size and print a warning.
331 *
332 * This function will never fails, worst case are limiting VRAM.
333 *
334 * Note: GTT start, end, size should be initialized before calling this
335 * function on AGP platform.
336 *
25985edc 337 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
338 * this shouldn't be a problem as we are using the PCI aperture as a reference.
339 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
340 * not IGP.
341 *
342 * Note: we use mc_vram_size as on some board we need to program the mc to
343 * cover the whole aperture even if VRAM size is inferior to aperture size
344 * Novell bug 204882 + along with lots of ubuntu ones
345 *
346 * Note: when limiting vram it's safe to overwritte real_vram_size because
347 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
348 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
349 * ones)
350 *
351 * Note: IGP TOM addr should be the same as the aperture addr, we don't
352 * explicitly check for that thought.
353 *
354 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 355 */
d594e46a 356void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 357{
1bcb04f7
CK
358 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
359
d594e46a
JG
360 mc->vram_start = base;
361 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
362 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
363 mc->real_vram_size = mc->aper_size;
364 mc->mc_vram_size = mc->aper_size;
365 }
366 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 367 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
368 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
369 mc->real_vram_size = mc->aper_size;
370 mc->mc_vram_size = mc->aper_size;
371 }
372 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
373 if (limit && limit < mc->real_vram_size)
374 mc->real_vram_size = limit;
dd7cc55a 375 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
376 mc->mc_vram_size >> 20, mc->vram_start,
377 mc->vram_end, mc->real_vram_size >> 20);
378}
771fe6b9 379
d594e46a
JG
380/**
381 * radeon_gtt_location - try to find GTT location
382 * @rdev: radeon device structure holding all necessary informations
383 * @mc: memory controller structure holding memory informations
384 *
385 * Function will place try to place GTT before or after VRAM.
386 *
387 * If GTT size is bigger than space left then we ajust GTT size.
388 * Thus function will never fails.
389 *
390 * FIXME: when reducing GTT size align new size on power of 2.
391 */
392void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
393{
394 u64 size_af, size_bf;
395
8d369bb1
AD
396 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
397 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
398 if (size_bf > size_af) {
399 if (mc->gtt_size > size_bf) {
400 dev_warn(rdev->dev, "limiting GTT\n");
401 mc->gtt_size = size_bf;
771fe6b9 402 }
8d369bb1 403 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 404 } else {
d594e46a
JG
405 if (mc->gtt_size > size_af) {
406 dev_warn(rdev->dev, "limiting GTT\n");
407 mc->gtt_size = size_af;
408 }
8d369bb1 409 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 410 }
d594e46a 411 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 412 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 413 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
414}
415
771fe6b9
JG
416/*
417 * GPU helpers function.
418 */
0c195119
AD
419/**
420 * radeon_card_posted - check if the hw has already been initialized
421 *
422 * @rdev: radeon_device pointer
423 *
424 * Check if the asic has been initialized (all asics).
425 * Used at driver startup.
426 * Returns true if initialized or false if not.
427 */
9f022ddf 428bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
429{
430 uint32_t reg;
431
bcc65fd8
MG
432 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
433 return false;
434
771fe6b9 435 /* first check CRTCs */
18007401
AD
436 if (ASIC_IS_DCE41(rdev)) {
437 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
438 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
439 if (reg & EVERGREEN_CRTC_MASTER_EN)
440 return true;
441 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
442 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
443 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
444 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
445 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
446 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
447 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
448 if (reg & EVERGREEN_CRTC_MASTER_EN)
449 return true;
450 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
451 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
452 RREG32(AVIVO_D2CRTC_CONTROL);
453 if (reg & AVIVO_CRTC_EN) {
454 return true;
455 }
456 } else {
457 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
458 RREG32(RADEON_CRTC2_GEN_CNTL);
459 if (reg & RADEON_CRTC_EN) {
460 return true;
461 }
462 }
463
464 /* then check MEM_SIZE, in case the crtcs are off */
465 if (rdev->family >= CHIP_R600)
466 reg = RREG32(R600_CONFIG_MEMSIZE);
467 else
468 reg = RREG32(RADEON_CONFIG_MEMSIZE);
469
470 if (reg)
471 return true;
472
473 return false;
474
475}
476
0c195119
AD
477/**
478 * radeon_update_bandwidth_info - update display bandwidth params
479 *
480 * @rdev: radeon_device pointer
481 *
482 * Used when sclk/mclk are switched or display modes are set.
483 * params are used to calculate display watermarks (all asics)
484 */
f47299c5
AD
485void radeon_update_bandwidth_info(struct radeon_device *rdev)
486{
487 fixed20_12 a;
8807286e
AD
488 u32 sclk = rdev->pm.current_sclk;
489 u32 mclk = rdev->pm.current_mclk;
f47299c5 490
8807286e
AD
491 /* sclk/mclk in Mhz */
492 a.full = dfixed_const(100);
493 rdev->pm.sclk.full = dfixed_const(sclk);
494 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
495 rdev->pm.mclk.full = dfixed_const(mclk);
496 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 497
8807286e 498 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 499 a.full = dfixed_const(16);
f47299c5 500 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 501 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
502 }
503}
504
0c195119
AD
505/**
506 * radeon_boot_test_post_card - check and possibly initialize the hw
507 *
508 * @rdev: radeon_device pointer
509 *
510 * Check if the asic is initialized and if not, attempt to initialize
511 * it (all asics).
512 * Returns true if initialized or false if not.
513 */
72542d77
DA
514bool radeon_boot_test_post_card(struct radeon_device *rdev)
515{
516 if (radeon_card_posted(rdev))
517 return true;
518
519 if (rdev->bios) {
520 DRM_INFO("GPU not posted. posting now...\n");
521 if (rdev->is_atom_bios)
522 atom_asic_init(rdev->mode_info.atom_context);
523 else
524 radeon_combios_asic_init(rdev->ddev);
525 return true;
526 } else {
527 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
528 return false;
529 }
530}
531
0c195119
AD
532/**
533 * radeon_dummy_page_init - init dummy page used by the driver
534 *
535 * @rdev: radeon_device pointer
536 *
537 * Allocate the dummy page used by the driver (all asics).
538 * This dummy page is used by the driver as a filler for gart entries
539 * when pages are taken out of the GART
540 * Returns 0 on sucess, -ENOMEM on failure.
541 */
3ce0a23d
JG
542int radeon_dummy_page_init(struct radeon_device *rdev)
543{
82568565
DA
544 if (rdev->dummy_page.page)
545 return 0;
3ce0a23d
JG
546 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
547 if (rdev->dummy_page.page == NULL)
548 return -ENOMEM;
549 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
550 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
551 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
552 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
553 __free_page(rdev->dummy_page.page);
554 rdev->dummy_page.page = NULL;
555 return -ENOMEM;
556 }
557 return 0;
558}
559
0c195119
AD
560/**
561 * radeon_dummy_page_fini - free dummy page used by the driver
562 *
563 * @rdev: radeon_device pointer
564 *
565 * Frees the dummy page used by the driver (all asics).
566 */
3ce0a23d
JG
567void radeon_dummy_page_fini(struct radeon_device *rdev)
568{
569 if (rdev->dummy_page.page == NULL)
570 return;
571 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
572 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
573 __free_page(rdev->dummy_page.page);
574 rdev->dummy_page.page = NULL;
575}
576
771fe6b9 577
771fe6b9 578/* ATOM accessor methods */
0c195119
AD
579/*
580 * ATOM is an interpreted byte code stored in tables in the vbios. The
581 * driver registers callbacks to access registers and the interpreter
582 * in the driver parses the tables and executes then to program specific
583 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
584 * atombios.h, and atom.c
585 */
586
587/**
588 * cail_pll_read - read PLL register
589 *
590 * @info: atom card_info pointer
591 * @reg: PLL register offset
592 *
593 * Provides a PLL register accessor for the atom interpreter (r4xx+).
594 * Returns the value of the PLL register.
595 */
771fe6b9
JG
596static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
597{
598 struct radeon_device *rdev = info->dev->dev_private;
599 uint32_t r;
600
601 r = rdev->pll_rreg(rdev, reg);
602 return r;
603}
604
0c195119
AD
605/**
606 * cail_pll_write - write PLL register
607 *
608 * @info: atom card_info pointer
609 * @reg: PLL register offset
610 * @val: value to write to the pll register
611 *
612 * Provides a PLL register accessor for the atom interpreter (r4xx+).
613 */
771fe6b9
JG
614static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
615{
616 struct radeon_device *rdev = info->dev->dev_private;
617
618 rdev->pll_wreg(rdev, reg, val);
619}
620
0c195119
AD
621/**
622 * cail_mc_read - read MC (Memory Controller) register
623 *
624 * @info: atom card_info pointer
625 * @reg: MC register offset
626 *
627 * Provides an MC register accessor for the atom interpreter (r4xx+).
628 * Returns the value of the MC register.
629 */
771fe6b9
JG
630static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
631{
632 struct radeon_device *rdev = info->dev->dev_private;
633 uint32_t r;
634
635 r = rdev->mc_rreg(rdev, reg);
636 return r;
637}
638
0c195119
AD
639/**
640 * cail_mc_write - write MC (Memory Controller) register
641 *
642 * @info: atom card_info pointer
643 * @reg: MC register offset
644 * @val: value to write to the pll register
645 *
646 * Provides a MC register accessor for the atom interpreter (r4xx+).
647 */
771fe6b9
JG
648static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
649{
650 struct radeon_device *rdev = info->dev->dev_private;
651
652 rdev->mc_wreg(rdev, reg, val);
653}
654
0c195119
AD
655/**
656 * cail_reg_write - write MMIO register
657 *
658 * @info: atom card_info pointer
659 * @reg: MMIO register offset
660 * @val: value to write to the pll register
661 *
662 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
663 */
771fe6b9
JG
664static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
665{
666 struct radeon_device *rdev = info->dev->dev_private;
667
668 WREG32(reg*4, val);
669}
670
0c195119
AD
671/**
672 * cail_reg_read - read MMIO register
673 *
674 * @info: atom card_info pointer
675 * @reg: MMIO register offset
676 *
677 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
678 * Returns the value of the MMIO register.
679 */
771fe6b9
JG
680static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
681{
682 struct radeon_device *rdev = info->dev->dev_private;
683 uint32_t r;
684
685 r = RREG32(reg*4);
686 return r;
687}
688
0c195119
AD
689/**
690 * cail_ioreg_write - write IO register
691 *
692 * @info: atom card_info pointer
693 * @reg: IO register offset
694 * @val: value to write to the pll register
695 *
696 * Provides a IO register accessor for the atom interpreter (r4xx+).
697 */
351a52a2
AD
698static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
699{
700 struct radeon_device *rdev = info->dev->dev_private;
701
702 WREG32_IO(reg*4, val);
703}
704
0c195119
AD
705/**
706 * cail_ioreg_read - read IO register
707 *
708 * @info: atom card_info pointer
709 * @reg: IO register offset
710 *
711 * Provides an IO register accessor for the atom interpreter (r4xx+).
712 * Returns the value of the IO register.
713 */
351a52a2
AD
714static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
715{
716 struct radeon_device *rdev = info->dev->dev_private;
717 uint32_t r;
718
719 r = RREG32_IO(reg*4);
720 return r;
721}
722
0c195119
AD
723/**
724 * radeon_atombios_init - init the driver info and callbacks for atombios
725 *
726 * @rdev: radeon_device pointer
727 *
728 * Initializes the driver info and register access callbacks for the
729 * ATOM interpreter (r4xx+).
730 * Returns 0 on sucess, -ENOMEM on failure.
731 * Called at driver startup.
732 */
771fe6b9
JG
733int radeon_atombios_init(struct radeon_device *rdev)
734{
61c4b24b
MF
735 struct card_info *atom_card_info =
736 kzalloc(sizeof(struct card_info), GFP_KERNEL);
737
738 if (!atom_card_info)
739 return -ENOMEM;
740
741 rdev->mode_info.atom_card_info = atom_card_info;
742 atom_card_info->dev = rdev->ddev;
743 atom_card_info->reg_read = cail_reg_read;
744 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
745 /* needed for iio ops */
746 if (rdev->rio_mem) {
747 atom_card_info->ioreg_read = cail_ioreg_read;
748 atom_card_info->ioreg_write = cail_ioreg_write;
749 } else {
750 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
751 atom_card_info->ioreg_read = cail_reg_read;
752 atom_card_info->ioreg_write = cail_reg_write;
753 }
61c4b24b
MF
754 atom_card_info->mc_read = cail_mc_read;
755 atom_card_info->mc_write = cail_mc_write;
756 atom_card_info->pll_read = cail_pll_read;
757 atom_card_info->pll_write = cail_pll_write;
758
759 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
c31ad97f 760 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 761 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 762 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
763 return 0;
764}
765
0c195119
AD
766/**
767 * radeon_atombios_fini - free the driver info and callbacks for atombios
768 *
769 * @rdev: radeon_device pointer
770 *
771 * Frees the driver info and register access callbacks for the ATOM
772 * interpreter (r4xx+).
773 * Called at driver shutdown.
774 */
771fe6b9
JG
775void radeon_atombios_fini(struct radeon_device *rdev)
776{
4a04a844
JG
777 if (rdev->mode_info.atom_context) {
778 kfree(rdev->mode_info.atom_context->scratch);
779 kfree(rdev->mode_info.atom_context);
780 }
61c4b24b 781 kfree(rdev->mode_info.atom_card_info);
771fe6b9
JG
782}
783
0c195119
AD
784/* COMBIOS */
785/*
786 * COMBIOS is the bios format prior to ATOM. It provides
787 * command tables similar to ATOM, but doesn't have a unified
788 * parser. See radeon_combios.c
789 */
790
791/**
792 * radeon_combios_init - init the driver info for combios
793 *
794 * @rdev: radeon_device pointer
795 *
796 * Initializes the driver info for combios (r1xx-r3xx).
797 * Returns 0 on sucess.
798 * Called at driver startup.
799 */
771fe6b9
JG
800int radeon_combios_init(struct radeon_device *rdev)
801{
802 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
803 return 0;
804}
805
0c195119
AD
806/**
807 * radeon_combios_fini - free the driver info for combios
808 *
809 * @rdev: radeon_device pointer
810 *
811 * Frees the driver info for combios (r1xx-r3xx).
812 * Called at driver shutdown.
813 */
771fe6b9
JG
814void radeon_combios_fini(struct radeon_device *rdev)
815{
816}
817
0c195119
AD
818/* if we get transitioned to only one device, take VGA back */
819/**
820 * radeon_vga_set_decode - enable/disable vga decode
821 *
822 * @cookie: radeon_device pointer
823 * @state: enable/disable vga decode
824 *
825 * Enable/disable vga decode (all asics).
826 * Returns VGA resource flags.
827 */
28d52043
DA
828static unsigned int radeon_vga_set_decode(void *cookie, bool state)
829{
830 struct radeon_device *rdev = cookie;
28d52043
DA
831 radeon_vga_set_state(rdev, state);
832 if (state)
833 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
834 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
835 else
836 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
837}
c1176d6f 838
1bcb04f7
CK
839/**
840 * radeon_check_pot_argument - check that argument is a power of two
841 *
842 * @arg: value to check
843 *
844 * Validates that a certain argument is a power of two (all asics).
845 * Returns true if argument is valid.
846 */
847static bool radeon_check_pot_argument(int arg)
848{
849 return (arg & (arg - 1)) == 0;
850}
851
0c195119
AD
852/**
853 * radeon_check_arguments - validate module params
854 *
855 * @rdev: radeon_device pointer
856 *
857 * Validates certain module parameters and updates
858 * the associated values used by the driver (all asics).
859 */
1109ca09 860static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
861{
862 /* vramlimit must be a power of two */
1bcb04f7 863 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
864 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
865 radeon_vram_limit);
866 radeon_vram_limit = 0;
36421338 867 }
1bcb04f7 868
36421338 869 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 870 if (radeon_gart_size < 32) {
36421338
JG
871 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
872 radeon_gart_size);
873 radeon_gart_size = 512;
1bcb04f7
CK
874
875 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
876 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
877 radeon_gart_size);
878 radeon_gart_size = 512;
36421338 879 }
1bcb04f7
CK
880 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
881
36421338
JG
882 /* AGP mode can only be -1, 1, 2, 4, 8 */
883 switch (radeon_agpmode) {
884 case -1:
885 case 0:
886 case 1:
887 case 2:
888 case 4:
889 case 8:
890 break;
891 default:
892 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
893 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
894 radeon_agpmode = 0;
895 break;
896 }
897}
898
0c195119
AD
899/**
900 * radeon_switcheroo_set_state - set switcheroo state
901 *
902 * @pdev: pci dev pointer
903 * @state: vga switcheroo state
904 *
905 * Callback for the switcheroo driver. Suspends or resumes the
906 * the asics before or after it is powered up using ACPI methods.
907 */
6a9ee8af
DA
908static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
909{
910 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
911 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
912 if (state == VGA_SWITCHEROO_ON) {
913 printk(KERN_INFO "radeon: switched on\n");
914 /* don't suspend or resume card normally */
5bcf719b 915 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 916 radeon_resume_kms(dev);
5bcf719b 917 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 918 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
919 } else {
920 printk(KERN_INFO "radeon: switched off\n");
fbf81762 921 drm_kms_helper_poll_disable(dev);
5bcf719b 922 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 923 radeon_suspend_kms(dev, pmm);
5bcf719b 924 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
925 }
926}
927
0c195119
AD
928/**
929 * radeon_switcheroo_can_switch - see if switcheroo state can change
930 *
931 * @pdev: pci dev pointer
932 *
933 * Callback for the switcheroo driver. Check of the switcheroo
934 * state can be changed.
935 * Returns true if the state can be changed, false if not.
936 */
6a9ee8af
DA
937static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
938{
939 struct drm_device *dev = pci_get_drvdata(pdev);
940 bool can_switch;
941
942 spin_lock(&dev->count_lock);
943 can_switch = (dev->open_count == 0);
944 spin_unlock(&dev->count_lock);
945 return can_switch;
946}
947
26ec685f
TI
948static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
949 .set_gpu_state = radeon_switcheroo_set_state,
950 .reprobe = NULL,
951 .can_switch = radeon_switcheroo_can_switch,
952};
6a9ee8af 953
0c195119
AD
954/**
955 * radeon_device_init - initialize the driver
956 *
957 * @rdev: radeon_device pointer
958 * @pdev: drm dev pointer
959 * @pdev: pci dev pointer
960 * @flags: driver flags
961 *
962 * Initializes the driver info and hw (all asics).
963 * Returns 0 for success or an error on failure.
964 * Called at driver startup.
965 */
771fe6b9
JG
966int radeon_device_init(struct radeon_device *rdev,
967 struct drm_device *ddev,
968 struct pci_dev *pdev,
969 uint32_t flags)
970{
351a52a2 971 int r, i;
ad49f501 972 int dma_bits;
771fe6b9 973
771fe6b9 974 rdev->shutdown = false;
9f022ddf 975 rdev->dev = &pdev->dev;
771fe6b9
JG
976 rdev->ddev = ddev;
977 rdev->pdev = pdev;
978 rdev->flags = flags;
979 rdev->family = flags & RADEON_FAMILY_MASK;
980 rdev->is_atom_bios = false;
981 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
982 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 983 rdev->accel_working = false;
8b25ed34
AD
984 /* set up ring ids */
985 for (i = 0; i < RADEON_NUM_RINGS; i++) {
986 rdev->ring[i].idx = i;
987 }
1b5331d9 988
d522d9cc
TR
989 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
990 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
991 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 992
771fe6b9
JG
993 /* mutex initialization are all done here so we
994 * can recall function without having locking issues */
d6999bc7 995 mutex_init(&rdev->ring_lock);
40bacf16 996 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 997 atomic_set(&rdev->ih.lock, 0);
4c788679 998 mutex_init(&rdev->gem.mutex);
c913e23a 999 mutex_init(&rdev->pm.mutex);
6759a0a7 1000 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1001 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1002 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1003 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1004 r = radeon_gem_init(rdev);
1005 if (r)
1006 return r;
721604a1 1007 /* initialize vm here */
36ff39c4 1008 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1009 /* Adjust VM size here.
1010 * Currently set to 4GB ((1 << 20) 4k pages).
1011 * Max GPUVM size for cayman and SI is 40 bits.
1012 */
721604a1
JG
1013 rdev->vm_manager.max_pfn = 1 << 20;
1014 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1015
4aac0473
JG
1016 /* Set asic functions */
1017 r = radeon_asic_init(rdev);
36421338 1018 if (r)
4aac0473 1019 return r;
36421338 1020 radeon_check_arguments(rdev);
4aac0473 1021
f95df9ca
AD
1022 /* all of the newer IGP chips have an internal gart
1023 * However some rs4xx report as AGP, so remove that here.
1024 */
1025 if ((rdev->family >= CHIP_RS400) &&
1026 (rdev->flags & RADEON_IS_IGP)) {
1027 rdev->flags &= ~RADEON_IS_AGP;
1028 }
1029
30256a3f 1030 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1031 radeon_agp_disable(rdev);
771fe6b9
JG
1032 }
1033
ad49f501
DA
1034 /* set DMA mask + need_dma32 flags.
1035 * PCIE - can handle 40-bits.
005a83f1 1036 * IGP - can handle 40-bits
ad49f501 1037 * AGP - generally dma32 is safest
005a83f1 1038 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1039 */
1040 rdev->need_dma32 = false;
1041 if (rdev->flags & RADEON_IS_AGP)
1042 rdev->need_dma32 = true;
005a83f1 1043 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1044 (rdev->family <= CHIP_RS740))
ad49f501
DA
1045 rdev->need_dma32 = true;
1046
1047 dma_bits = rdev->need_dma32 ? 32 : 40;
1048 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1049 if (r) {
62fff811 1050 rdev->need_dma32 = true;
c52494f6 1051 dma_bits = 32;
771fe6b9
JG
1052 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1053 }
c52494f6
KRW
1054 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1055 if (r) {
1056 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1057 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1058 }
771fe6b9
JG
1059
1060 /* Registers mapping */
1061 /* TODO: block userspace mapping of io register */
01d73a69
JC
1062 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1063 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
1064 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1065 if (rdev->rmmio == NULL) {
1066 return -ENOMEM;
1067 }
1068 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1069 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1070
351a52a2
AD
1071 /* io port mapping */
1072 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1073 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1074 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1075 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1076 break;
1077 }
1078 }
1079 if (rdev->rio_mem == NULL)
1080 DRM_ERROR("Unable to find PCI I/O BAR\n");
1081
28d52043 1082 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1083 /* this will fail for cards that aren't VGA class devices, just
1084 * ignore it */
1085 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1086 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1087
3ce0a23d 1088 r = radeon_init(rdev);
b574f251 1089 if (r)
3ce0a23d 1090 return r;
3ce0a23d 1091
04eb2206
CK
1092 r = radeon_ib_ring_tests(rdev);
1093 if (r)
1094 DRM_ERROR("ib ring test failed (%d).\n", r);
1095
b574f251
JG
1096 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1097 /* Acceleration not working on AGP card try again
1098 * with fallback to PCI or PCIE GART
1099 */
a2d07b74 1100 radeon_asic_reset(rdev);
b574f251
JG
1101 radeon_fini(rdev);
1102 radeon_agp_disable(rdev);
1103 r = radeon_init(rdev);
4aac0473
JG
1104 if (r)
1105 return r;
771fe6b9 1106 }
60a7e396 1107 if ((radeon_testing & 1)) {
ecc0b326
MD
1108 radeon_test_moves(rdev);
1109 }
60a7e396
CK
1110 if ((radeon_testing & 2)) {
1111 radeon_test_syncing(rdev);
1112 }
771fe6b9 1113 if (radeon_benchmarking) {
638dd7db 1114 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1115 }
6cf8a3f5 1116 return 0;
771fe6b9
JG
1117}
1118
4d8bf9ae
CK
1119static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1120
0c195119
AD
1121/**
1122 * radeon_device_fini - tear down the driver
1123 *
1124 * @rdev: radeon_device pointer
1125 *
1126 * Tear down the driver info (all asics).
1127 * Called at driver shutdown.
1128 */
771fe6b9
JG
1129void radeon_device_fini(struct radeon_device *rdev)
1130{
771fe6b9
JG
1131 DRM_INFO("radeon: finishing device.\n");
1132 rdev->shutdown = true;
90aca4d2
JG
1133 /* evict vram memory */
1134 radeon_bo_evict_vram(rdev);
62a8ea3f 1135 radeon_fini(rdev);
6a9ee8af 1136 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1137 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1138 if (rdev->rio_mem)
1139 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1140 rdev->rio_mem = NULL;
771fe6b9
JG
1141 iounmap(rdev->rmmio);
1142 rdev->rmmio = NULL;
4d8bf9ae 1143 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1144}
1145
1146
1147/*
1148 * Suspend & resume.
1149 */
0c195119
AD
1150/**
1151 * radeon_suspend_kms - initiate device suspend
1152 *
1153 * @pdev: drm dev pointer
1154 * @state: suspend state
1155 *
1156 * Puts the hw in the suspend state (all asics).
1157 * Returns 0 for success or an error on failure.
1158 * Called at driver suspend.
1159 */
771fe6b9
JG
1160int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1161{
875c1866 1162 struct radeon_device *rdev;
771fe6b9 1163 struct drm_crtc *crtc;
d8dcaa1d 1164 struct drm_connector *connector;
7465280c 1165 int i, r;
771fe6b9 1166
875c1866 1167 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1168 return -ENODEV;
1169 }
1170 if (state.event == PM_EVENT_PRETHAW) {
1171 return 0;
1172 }
875c1866
DJ
1173 rdev = dev->dev_private;
1174
5bcf719b 1175 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1176 return 0;
d8dcaa1d 1177
86698c20
SF
1178 drm_kms_helper_poll_disable(dev);
1179
d8dcaa1d
AD
1180 /* turn off display hw */
1181 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1182 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1183 }
1184
771fe6b9
JG
1185 /* unpin the front buffers */
1186 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1187 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1188 struct radeon_bo *robj;
771fe6b9
JG
1189
1190 if (rfb == NULL || rfb->obj == NULL) {
1191 continue;
1192 }
7e4d15d9 1193 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1194 /* don't unpin kernel fb objects */
1195 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1196 r = radeon_bo_reserve(robj, false);
38651674 1197 if (r == 0) {
4c788679
JG
1198 radeon_bo_unpin(robj);
1199 radeon_bo_unreserve(robj);
1200 }
771fe6b9
JG
1201 }
1202 }
1203 /* evict vram memory */
4c788679 1204 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1205
1206 mutex_lock(&rdev->ring_lock);
771fe6b9 1207 /* wait for gpu to finish processing current batch */
7465280c 1208 for (i = 0; i < RADEON_NUM_RINGS; i++)
8a47cc9e
CK
1209 radeon_fence_wait_empty_locked(rdev, i);
1210 mutex_unlock(&rdev->ring_lock);
771fe6b9 1211
f657c2a7
YZ
1212 radeon_save_bios_scratch_regs(rdev);
1213
ce8f5370 1214 radeon_pm_suspend(rdev);
62a8ea3f 1215 radeon_suspend(rdev);
d4877cf2 1216 radeon_hpd_fini(rdev);
771fe6b9 1217 /* evict remaining vram memory */
4c788679 1218 radeon_bo_evict_vram(rdev);
771fe6b9 1219
10b06122
JG
1220 radeon_agp_suspend(rdev);
1221
771fe6b9
JG
1222 pci_save_state(dev->pdev);
1223 if (state.event == PM_EVENT_SUSPEND) {
1224 /* Shut down the device */
1225 pci_disable_device(dev->pdev);
1226 pci_set_power_state(dev->pdev, PCI_D3hot);
1227 }
ac751efa 1228 console_lock();
38651674 1229 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1230 console_unlock();
771fe6b9
JG
1231 return 0;
1232}
1233
0c195119
AD
1234/**
1235 * radeon_resume_kms - initiate device resume
1236 *
1237 * @pdev: drm dev pointer
1238 *
1239 * Bring the hw back to operating state (all asics).
1240 * Returns 0 for success or an error on failure.
1241 * Called at driver resume.
1242 */
771fe6b9
JG
1243int radeon_resume_kms(struct drm_device *dev)
1244{
09bdf591 1245 struct drm_connector *connector;
771fe6b9 1246 struct radeon_device *rdev = dev->dev_private;
04eb2206 1247 int r;
771fe6b9 1248
5bcf719b 1249 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1250 return 0;
1251
ac751efa 1252 console_lock();
771fe6b9
JG
1253 pci_set_power_state(dev->pdev, PCI_D0);
1254 pci_restore_state(dev->pdev);
1255 if (pci_enable_device(dev->pdev)) {
ac751efa 1256 console_unlock();
771fe6b9
JG
1257 return -1;
1258 }
0ebf1717
DA
1259 /* resume AGP if in use */
1260 radeon_agp_resume(rdev);
62a8ea3f 1261 radeon_resume(rdev);
04eb2206
CK
1262
1263 r = radeon_ib_ring_tests(rdev);
1264 if (r)
1265 DRM_ERROR("ib ring test failed (%d).\n", r);
1266
ce8f5370 1267 radeon_pm_resume(rdev);
f657c2a7 1268 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1269
38651674 1270 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1271 console_unlock();
771fe6b9 1272
3fa47d9e
AD
1273 /* init dig PHYs, disp eng pll */
1274 if (rdev->is_atom_bios) {
ac89af1e 1275 radeon_atom_encoder_init(rdev);
f3f1f03e 1276 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1277 /* turn on the BL */
1278 if (rdev->mode_info.bl_encoder) {
1279 u8 bl_level = radeon_get_backlight_level(rdev,
1280 rdev->mode_info.bl_encoder);
1281 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1282 bl_level);
1283 }
3fa47d9e 1284 }
d4877cf2
AD
1285 /* reset hpd state */
1286 radeon_hpd_init(rdev);
771fe6b9
JG
1287 /* blat the mode back in */
1288 drm_helper_resume_force_mode(dev);
a93f344d
AD
1289 /* turn on display hw */
1290 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1291 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1292 }
86698c20
SF
1293
1294 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1295 return 0;
1296}
1297
0c195119
AD
1298/**
1299 * radeon_gpu_reset - reset the asic
1300 *
1301 * @rdev: radeon device pointer
1302 *
1303 * Attempt the reset the GPU if it has hung (all asics).
1304 * Returns 0 for success or an error on failure.
1305 */
90aca4d2
JG
1306int radeon_gpu_reset(struct radeon_device *rdev)
1307{
55d7c221
CK
1308 unsigned ring_sizes[RADEON_NUM_RINGS];
1309 uint32_t *ring_data[RADEON_NUM_RINGS];
1310
1311 bool saved = false;
1312
1313 int i, r;
8fd1b84c 1314 int resched;
90aca4d2 1315
dee53e7f 1316 down_write(&rdev->exclusive_lock);
90aca4d2 1317 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1318 /* block TTM */
1319 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1320 radeon_suspend(rdev);
1321
55d7c221
CK
1322 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1323 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1324 &ring_data[i]);
1325 if (ring_sizes[i]) {
1326 saved = true;
1327 dev_info(rdev->dev, "Saved %d dwords of commands "
1328 "on ring %d.\n", ring_sizes[i], i);
1329 }
1330 }
1331
1332retry:
90aca4d2
JG
1333 r = radeon_asic_reset(rdev);
1334 if (!r) {
55d7c221 1335 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1336 radeon_resume(rdev);
55d7c221 1337 }
04eb2206 1338
55d7c221
CK
1339 radeon_restore_bios_scratch_regs(rdev);
1340 drm_helper_resume_force_mode(rdev->ddev);
04eb2206 1341
55d7c221
CK
1342 if (!r) {
1343 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1344 radeon_ring_restore(rdev, &rdev->ring[i],
1345 ring_sizes[i], ring_data[i]);
f54b350d
CK
1346 ring_sizes[i] = 0;
1347 ring_data[i] = NULL;
55d7c221
CK
1348 }
1349
1350 r = radeon_ib_ring_tests(rdev);
1351 if (r) {
1352 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1353 if (saved) {
f54b350d 1354 saved = false;
55d7c221
CK
1355 radeon_suspend(rdev);
1356 goto retry;
1357 }
1358 }
1359 } else {
1360 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1361 kfree(ring_data[i]);
1362 }
90aca4d2 1363 }
7a1619b9 1364
55d7c221 1365 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1366 if (r) {
1367 /* bad news, how to tell it to userspace ? */
1368 dev_info(rdev->dev, "GPU reset failed\n");
1369 }
1370
dee53e7f 1371 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1372 return r;
1373}
1374
771fe6b9
JG
1375
1376/*
1377 * Debugfs
1378 */
771fe6b9
JG
1379int radeon_debugfs_add_files(struct radeon_device *rdev,
1380 struct drm_info_list *files,
1381 unsigned nfiles)
1382{
1383 unsigned i;
1384
4d8bf9ae
CK
1385 for (i = 0; i < rdev->debugfs_count; i++) {
1386 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1387 /* Already registered */
1388 return 0;
1389 }
1390 }
c245cb9e 1391
4d8bf9ae 1392 i = rdev->debugfs_count + 1;
c245cb9e
MW
1393 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1394 DRM_ERROR("Reached maximum number of debugfs components.\n");
1395 DRM_ERROR("Report so we increase "
1396 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1397 return -EINVAL;
1398 }
4d8bf9ae
CK
1399 rdev->debugfs[rdev->debugfs_count].files = files;
1400 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1401 rdev->debugfs_count = i;
771fe6b9
JG
1402#if defined(CONFIG_DEBUG_FS)
1403 drm_debugfs_create_files(files, nfiles,
1404 rdev->ddev->control->debugfs_root,
1405 rdev->ddev->control);
1406 drm_debugfs_create_files(files, nfiles,
1407 rdev->ddev->primary->debugfs_root,
1408 rdev->ddev->primary);
1409#endif
1410 return 0;
1411}
1412
4d8bf9ae
CK
1413static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1414{
1415#if defined(CONFIG_DEBUG_FS)
1416 unsigned i;
1417
1418 for (i = 0; i < rdev->debugfs_count; i++) {
1419 drm_debugfs_remove_files(rdev->debugfs[i].files,
1420 rdev->debugfs[i].num_files,
1421 rdev->ddev->control);
1422 drm_debugfs_remove_files(rdev->debugfs[i].files,
1423 rdev->debugfs[i].num_files,
1424 rdev->ddev->primary);
1425 }
1426#endif
1427}
1428
771fe6b9
JG
1429#if defined(CONFIG_DEBUG_FS)
1430int radeon_debugfs_init(struct drm_minor *minor)
1431{
1432 return 0;
1433}
1434
1435void radeon_debugfs_cleanup(struct drm_minor *minor)
1436{
771fe6b9
JG
1437}
1438#endif
This page took 0.295356 seconds and 5 git commands to generate.