Merge tag 'pci-v3.15-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
3bf599e8 101 "HAWAII",
1b5331d9
JG
102 "LAST",
103};
104
10ebc0bc
DA
105#if defined(CONFIG_VGA_SWITCHEROO)
106bool radeon_is_px(void);
107#else
108static inline bool radeon_is_px(void) { return false; }
109#endif
110
2e1b65f9
AD
111/**
112 * radeon_program_register_sequence - program an array of registers.
113 *
114 * @rdev: radeon_device pointer
115 * @registers: pointer to the register array
116 * @array_size: size of the register array
117 *
118 * Programs an array or registers with and and or masks.
119 * This is a helper for setting golden registers.
120 */
121void radeon_program_register_sequence(struct radeon_device *rdev,
122 const u32 *registers,
123 const u32 array_size)
124{
125 u32 tmp, reg, and_mask, or_mask;
126 int i;
127
128 if (array_size % 3)
129 return;
130
131 for (i = 0; i < array_size; i +=3) {
132 reg = registers[i + 0];
133 and_mask = registers[i + 1];
134 or_mask = registers[i + 2];
135
136 if (and_mask == 0xffffffff) {
137 tmp = or_mask;
138 } else {
139 tmp = RREG32(reg);
140 tmp &= ~and_mask;
141 tmp |= or_mask;
142 }
143 WREG32(reg, tmp);
144 }
145}
146
1a0041b8
AD
147void radeon_pci_config_reset(struct radeon_device *rdev)
148{
149 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
150}
151
0c195119
AD
152/**
153 * radeon_surface_init - Clear GPU surface registers.
154 *
155 * @rdev: radeon_device pointer
156 *
157 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 158 */
3ce0a23d 159void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
160{
161 /* FIXME: check this out */
162 if (rdev->family < CHIP_R600) {
163 int i;
164
550e2d92
DA
165 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
166 if (rdev->surface_regs[i].bo)
167 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
168 else
169 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 170 }
e024e110
DA
171 /* enable surfaces */
172 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
173 }
174}
175
771fe6b9
JG
176/*
177 * GPU scratch registers helpers function.
178 */
0c195119
AD
179/**
180 * radeon_scratch_init - Init scratch register driver information.
181 *
182 * @rdev: radeon_device pointer
183 *
184 * Init CP scratch register driver information (r1xx-r5xx)
185 */
3ce0a23d 186void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
187{
188 int i;
189
190 /* FIXME: check this out */
191 if (rdev->family < CHIP_R300) {
192 rdev->scratch.num_reg = 5;
193 } else {
194 rdev->scratch.num_reg = 7;
195 }
724c80e1 196 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
197 for (i = 0; i < rdev->scratch.num_reg; i++) {
198 rdev->scratch.free[i] = true;
724c80e1 199 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
200 }
201}
202
0c195119
AD
203/**
204 * radeon_scratch_get - Allocate a scratch register
205 *
206 * @rdev: radeon_device pointer
207 * @reg: scratch register mmio offset
208 *
209 * Allocate a CP scratch register for use by the driver (all asics).
210 * Returns 0 on success or -EINVAL on failure.
211 */
771fe6b9
JG
212int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
213{
214 int i;
215
216 for (i = 0; i < rdev->scratch.num_reg; i++) {
217 if (rdev->scratch.free[i]) {
218 rdev->scratch.free[i] = false;
219 *reg = rdev->scratch.reg[i];
220 return 0;
221 }
222 }
223 return -EINVAL;
224}
225
0c195119
AD
226/**
227 * radeon_scratch_free - Free a scratch register
228 *
229 * @rdev: radeon_device pointer
230 * @reg: scratch register mmio offset
231 *
232 * Free a CP scratch register allocated for use by the driver (all asics)
233 */
771fe6b9
JG
234void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
235{
236 int i;
237
238 for (i = 0; i < rdev->scratch.num_reg; i++) {
239 if (rdev->scratch.reg[i] == reg) {
240 rdev->scratch.free[i] = true;
241 return;
242 }
243 }
244}
245
75efdee1
AD
246/*
247 * GPU doorbell aperture helpers function.
248 */
249/**
250 * radeon_doorbell_init - Init doorbell driver information.
251 *
252 * @rdev: radeon_device pointer
253 *
254 * Init doorbell driver information (CIK)
255 * Returns 0 on success, error on failure.
256 */
28f5a6cd 257static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 258{
75efdee1
AD
259 /* doorbell bar mapping */
260 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
261 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
262
d5754ab8
AL
263 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
264 if (rdev->doorbell.num_doorbells == 0)
265 return -EINVAL;
75efdee1 266
d5754ab8 267 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
268 if (rdev->doorbell.ptr == NULL) {
269 return -ENOMEM;
270 }
271 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
272 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
273
d5754ab8 274 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 275
75efdee1
AD
276 return 0;
277}
278
279/**
280 * radeon_doorbell_fini - Tear down doorbell driver information.
281 *
282 * @rdev: radeon_device pointer
283 *
284 * Tear down doorbell driver information (CIK)
285 */
28f5a6cd 286static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
287{
288 iounmap(rdev->doorbell.ptr);
289 rdev->doorbell.ptr = NULL;
290}
291
292/**
d5754ab8 293 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
294 *
295 * @rdev: radeon_device pointer
d5754ab8 296 * @doorbell: doorbell index
75efdee1 297 *
d5754ab8 298 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
299 * Returns 0 on success or -EINVAL on failure.
300 */
301int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
302{
d5754ab8
AL
303 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
304 if (offset < rdev->doorbell.num_doorbells) {
305 __set_bit(offset, rdev->doorbell.used);
306 *doorbell = offset;
307 return 0;
308 } else {
309 return -EINVAL;
75efdee1 310 }
75efdee1
AD
311}
312
313/**
d5754ab8 314 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
315 *
316 * @rdev: radeon_device pointer
d5754ab8 317 * @doorbell: doorbell index
75efdee1 318 *
d5754ab8 319 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
320 */
321void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
322{
d5754ab8
AL
323 if (doorbell < rdev->doorbell.num_doorbells)
324 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
325}
326
0c195119
AD
327/*
328 * radeon_wb_*()
329 * Writeback is the the method by which the the GPU updates special pages
330 * in memory with the status of certain GPU events (fences, ring pointers,
331 * etc.).
332 */
333
334/**
335 * radeon_wb_disable - Disable Writeback
336 *
337 * @rdev: radeon_device pointer
338 *
339 * Disables Writeback (all asics). Used for suspend.
340 */
724c80e1
AD
341void radeon_wb_disable(struct radeon_device *rdev)
342{
724c80e1
AD
343 rdev->wb.enabled = false;
344}
345
0c195119
AD
346/**
347 * radeon_wb_fini - Disable Writeback and free memory
348 *
349 * @rdev: radeon_device pointer
350 *
351 * Disables Writeback and frees the Writeback memory (all asics).
352 * Used at driver shutdown.
353 */
724c80e1
AD
354void radeon_wb_fini(struct radeon_device *rdev)
355{
356 radeon_wb_disable(rdev);
357 if (rdev->wb.wb_obj) {
089920f2
JG
358 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
359 radeon_bo_kunmap(rdev->wb.wb_obj);
360 radeon_bo_unpin(rdev->wb.wb_obj);
361 radeon_bo_unreserve(rdev->wb.wb_obj);
362 }
724c80e1
AD
363 radeon_bo_unref(&rdev->wb.wb_obj);
364 rdev->wb.wb = NULL;
365 rdev->wb.wb_obj = NULL;
366 }
367}
368
0c195119
AD
369/**
370 * radeon_wb_init- Init Writeback driver info and allocate memory
371 *
372 * @rdev: radeon_device pointer
373 *
374 * Disables Writeback and frees the Writeback memory (all asics).
375 * Used at driver startup.
376 * Returns 0 on success or an -error on failure.
377 */
724c80e1
AD
378int radeon_wb_init(struct radeon_device *rdev)
379{
380 int r;
381
382 if (rdev->wb.wb_obj == NULL) {
441921d5 383 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 384 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
385 if (r) {
386 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
387 return r;
388 }
089920f2
JG
389 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
390 if (unlikely(r != 0)) {
391 radeon_wb_fini(rdev);
392 return r;
393 }
394 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
395 &rdev->wb.gpu_addr);
396 if (r) {
397 radeon_bo_unreserve(rdev->wb.wb_obj);
398 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
399 radeon_wb_fini(rdev);
400 return r;
401 }
402 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 403 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
404 if (r) {
405 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
406 radeon_wb_fini(rdev);
407 return r;
408 }
724c80e1
AD
409 }
410
e6ba7599
AD
411 /* clear wb memory */
412 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
413 /* disable event_write fences */
414 rdev->wb.use_event = false;
724c80e1 415 /* disabled via module param */
3b7a2b24 416 if (radeon_no_wb == 1) {
724c80e1 417 rdev->wb.enabled = false;
3b7a2b24 418 } else {
724c80e1 419 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
420 /* often unreliable on AGP */
421 rdev->wb.enabled = false;
422 } else if (rdev->family < CHIP_R300) {
423 /* often unreliable on pre-r300 */
724c80e1 424 rdev->wb.enabled = false;
d0f8a854 425 } else {
724c80e1 426 rdev->wb.enabled = true;
d0f8a854 427 /* event_write fences are only available on r600+ */
3b7a2b24 428 if (rdev->family >= CHIP_R600) {
d0f8a854 429 rdev->wb.use_event = true;
3b7a2b24 430 }
d0f8a854 431 }
724c80e1 432 }
c994ead6
AD
433 /* always use writeback/events on NI, APUs */
434 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
435 rdev->wb.enabled = true;
436 rdev->wb.use_event = true;
437 }
724c80e1
AD
438
439 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
440
441 return 0;
442}
443
d594e46a
JG
444/**
445 * radeon_vram_location - try to find VRAM location
446 * @rdev: radeon device structure holding all necessary informations
447 * @mc: memory controller structure holding memory informations
448 * @base: base address at which to put VRAM
449 *
450 * Function will place try to place VRAM at base address provided
451 * as parameter (which is so far either PCI aperture address or
452 * for IGP TOM base address).
453 *
454 * If there is not enough space to fit the unvisible VRAM in the 32bits
455 * address space then we limit the VRAM size to the aperture.
456 *
457 * If we are using AGP and if the AGP aperture doesn't allow us to have
458 * room for all the VRAM than we restrict the VRAM to the PCI aperture
459 * size and print a warning.
460 *
461 * This function will never fails, worst case are limiting VRAM.
462 *
463 * Note: GTT start, end, size should be initialized before calling this
464 * function on AGP platform.
465 *
25985edc 466 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
467 * this shouldn't be a problem as we are using the PCI aperture as a reference.
468 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
469 * not IGP.
470 *
471 * Note: we use mc_vram_size as on some board we need to program the mc to
472 * cover the whole aperture even if VRAM size is inferior to aperture size
473 * Novell bug 204882 + along with lots of ubuntu ones
474 *
475 * Note: when limiting vram it's safe to overwritte real_vram_size because
476 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
477 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
478 * ones)
479 *
480 * Note: IGP TOM addr should be the same as the aperture addr, we don't
481 * explicitly check for that thought.
482 *
483 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 484 */
d594e46a 485void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 486{
1bcb04f7
CK
487 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
488
d594e46a 489 mc->vram_start = base;
9ed8b1f9 490 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
491 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
492 mc->real_vram_size = mc->aper_size;
493 mc->mc_vram_size = mc->aper_size;
494 }
495 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 496 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
497 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
498 mc->real_vram_size = mc->aper_size;
499 mc->mc_vram_size = mc->aper_size;
500 }
501 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
502 if (limit && limit < mc->real_vram_size)
503 mc->real_vram_size = limit;
dd7cc55a 504 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
505 mc->mc_vram_size >> 20, mc->vram_start,
506 mc->vram_end, mc->real_vram_size >> 20);
507}
771fe6b9 508
d594e46a
JG
509/**
510 * radeon_gtt_location - try to find GTT location
511 * @rdev: radeon device structure holding all necessary informations
512 * @mc: memory controller structure holding memory informations
513 *
514 * Function will place try to place GTT before or after VRAM.
515 *
516 * If GTT size is bigger than space left then we ajust GTT size.
517 * Thus function will never fails.
518 *
519 * FIXME: when reducing GTT size align new size on power of 2.
520 */
521void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
522{
523 u64 size_af, size_bf;
524
9ed8b1f9 525 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 526 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
527 if (size_bf > size_af) {
528 if (mc->gtt_size > size_bf) {
529 dev_warn(rdev->dev, "limiting GTT\n");
530 mc->gtt_size = size_bf;
771fe6b9 531 }
8d369bb1 532 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 533 } else {
d594e46a
JG
534 if (mc->gtt_size > size_af) {
535 dev_warn(rdev->dev, "limiting GTT\n");
536 mc->gtt_size = size_af;
537 }
8d369bb1 538 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 539 }
d594e46a 540 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 541 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 542 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
543}
544
771fe6b9
JG
545/*
546 * GPU helpers function.
547 */
0c195119
AD
548/**
549 * radeon_card_posted - check if the hw has already been initialized
550 *
551 * @rdev: radeon_device pointer
552 *
553 * Check if the asic has been initialized (all asics).
554 * Used at driver startup.
555 * Returns true if initialized or false if not.
556 */
9f022ddf 557bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
558{
559 uint32_t reg;
560
50a583f6 561 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 562 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
563 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
564 (rdev->family < CHIP_R600))
bcc65fd8
MG
565 return false;
566
2cf3a4fc
AD
567 if (ASIC_IS_NODCE(rdev))
568 goto check_memsize;
569
771fe6b9 570 /* first check CRTCs */
09fb8bd1 571 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
572 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
573 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
574 if (rdev->num_crtc >= 4) {
575 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
576 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
577 }
578 if (rdev->num_crtc >= 6) {
579 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
580 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
581 }
bcc1c2a1
AD
582 if (reg & EVERGREEN_CRTC_MASTER_EN)
583 return true;
584 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
585 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
586 RREG32(AVIVO_D2CRTC_CONTROL);
587 if (reg & AVIVO_CRTC_EN) {
588 return true;
589 }
590 } else {
591 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
592 RREG32(RADEON_CRTC2_GEN_CNTL);
593 if (reg & RADEON_CRTC_EN) {
594 return true;
595 }
596 }
597
2cf3a4fc 598check_memsize:
771fe6b9
JG
599 /* then check MEM_SIZE, in case the crtcs are off */
600 if (rdev->family >= CHIP_R600)
601 reg = RREG32(R600_CONFIG_MEMSIZE);
602 else
603 reg = RREG32(RADEON_CONFIG_MEMSIZE);
604
605 if (reg)
606 return true;
607
608 return false;
609
610}
611
0c195119
AD
612/**
613 * radeon_update_bandwidth_info - update display bandwidth params
614 *
615 * @rdev: radeon_device pointer
616 *
617 * Used when sclk/mclk are switched or display modes are set.
618 * params are used to calculate display watermarks (all asics)
619 */
f47299c5
AD
620void radeon_update_bandwidth_info(struct radeon_device *rdev)
621{
622 fixed20_12 a;
8807286e
AD
623 u32 sclk = rdev->pm.current_sclk;
624 u32 mclk = rdev->pm.current_mclk;
f47299c5 625
8807286e
AD
626 /* sclk/mclk in Mhz */
627 a.full = dfixed_const(100);
628 rdev->pm.sclk.full = dfixed_const(sclk);
629 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
630 rdev->pm.mclk.full = dfixed_const(mclk);
631 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 632
8807286e 633 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 634 a.full = dfixed_const(16);
f47299c5 635 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 636 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
637 }
638}
639
0c195119
AD
640/**
641 * radeon_boot_test_post_card - check and possibly initialize the hw
642 *
643 * @rdev: radeon_device pointer
644 *
645 * Check if the asic is initialized and if not, attempt to initialize
646 * it (all asics).
647 * Returns true if initialized or false if not.
648 */
72542d77
DA
649bool radeon_boot_test_post_card(struct radeon_device *rdev)
650{
651 if (radeon_card_posted(rdev))
652 return true;
653
654 if (rdev->bios) {
655 DRM_INFO("GPU not posted. posting now...\n");
656 if (rdev->is_atom_bios)
657 atom_asic_init(rdev->mode_info.atom_context);
658 else
659 radeon_combios_asic_init(rdev->ddev);
660 return true;
661 } else {
662 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
663 return false;
664 }
665}
666
0c195119
AD
667/**
668 * radeon_dummy_page_init - init dummy page used by the driver
669 *
670 * @rdev: radeon_device pointer
671 *
672 * Allocate the dummy page used by the driver (all asics).
673 * This dummy page is used by the driver as a filler for gart entries
674 * when pages are taken out of the GART
675 * Returns 0 on sucess, -ENOMEM on failure.
676 */
3ce0a23d
JG
677int radeon_dummy_page_init(struct radeon_device *rdev)
678{
82568565
DA
679 if (rdev->dummy_page.page)
680 return 0;
3ce0a23d
JG
681 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
682 if (rdev->dummy_page.page == NULL)
683 return -ENOMEM;
684 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
685 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
686 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
687 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
688 __free_page(rdev->dummy_page.page);
689 rdev->dummy_page.page = NULL;
690 return -ENOMEM;
691 }
692 return 0;
693}
694
0c195119
AD
695/**
696 * radeon_dummy_page_fini - free dummy page used by the driver
697 *
698 * @rdev: radeon_device pointer
699 *
700 * Frees the dummy page used by the driver (all asics).
701 */
3ce0a23d
JG
702void radeon_dummy_page_fini(struct radeon_device *rdev)
703{
704 if (rdev->dummy_page.page == NULL)
705 return;
706 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
707 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
708 __free_page(rdev->dummy_page.page);
709 rdev->dummy_page.page = NULL;
710}
711
771fe6b9 712
771fe6b9 713/* ATOM accessor methods */
0c195119
AD
714/*
715 * ATOM is an interpreted byte code stored in tables in the vbios. The
716 * driver registers callbacks to access registers and the interpreter
717 * in the driver parses the tables and executes then to program specific
718 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
719 * atombios.h, and atom.c
720 */
721
722/**
723 * cail_pll_read - read PLL register
724 *
725 * @info: atom card_info pointer
726 * @reg: PLL register offset
727 *
728 * Provides a PLL register accessor for the atom interpreter (r4xx+).
729 * Returns the value of the PLL register.
730 */
771fe6b9
JG
731static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
732{
733 struct radeon_device *rdev = info->dev->dev_private;
734 uint32_t r;
735
736 r = rdev->pll_rreg(rdev, reg);
737 return r;
738}
739
0c195119
AD
740/**
741 * cail_pll_write - write PLL register
742 *
743 * @info: atom card_info pointer
744 * @reg: PLL register offset
745 * @val: value to write to the pll register
746 *
747 * Provides a PLL register accessor for the atom interpreter (r4xx+).
748 */
771fe6b9
JG
749static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
750{
751 struct radeon_device *rdev = info->dev->dev_private;
752
753 rdev->pll_wreg(rdev, reg, val);
754}
755
0c195119
AD
756/**
757 * cail_mc_read - read MC (Memory Controller) register
758 *
759 * @info: atom card_info pointer
760 * @reg: MC register offset
761 *
762 * Provides an MC register accessor for the atom interpreter (r4xx+).
763 * Returns the value of the MC register.
764 */
771fe6b9
JG
765static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
766{
767 struct radeon_device *rdev = info->dev->dev_private;
768 uint32_t r;
769
770 r = rdev->mc_rreg(rdev, reg);
771 return r;
772}
773
0c195119
AD
774/**
775 * cail_mc_write - write MC (Memory Controller) register
776 *
777 * @info: atom card_info pointer
778 * @reg: MC register offset
779 * @val: value to write to the pll register
780 *
781 * Provides a MC register accessor for the atom interpreter (r4xx+).
782 */
771fe6b9
JG
783static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
784{
785 struct radeon_device *rdev = info->dev->dev_private;
786
787 rdev->mc_wreg(rdev, reg, val);
788}
789
0c195119
AD
790/**
791 * cail_reg_write - write MMIO register
792 *
793 * @info: atom card_info pointer
794 * @reg: MMIO register offset
795 * @val: value to write to the pll register
796 *
797 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
798 */
771fe6b9
JG
799static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
800{
801 struct radeon_device *rdev = info->dev->dev_private;
802
803 WREG32(reg*4, val);
804}
805
0c195119
AD
806/**
807 * cail_reg_read - read MMIO register
808 *
809 * @info: atom card_info pointer
810 * @reg: MMIO register offset
811 *
812 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
813 * Returns the value of the MMIO register.
814 */
771fe6b9
JG
815static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
816{
817 struct radeon_device *rdev = info->dev->dev_private;
818 uint32_t r;
819
820 r = RREG32(reg*4);
821 return r;
822}
823
0c195119
AD
824/**
825 * cail_ioreg_write - write IO register
826 *
827 * @info: atom card_info pointer
828 * @reg: IO register offset
829 * @val: value to write to the pll register
830 *
831 * Provides a IO register accessor for the atom interpreter (r4xx+).
832 */
351a52a2
AD
833static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
834{
835 struct radeon_device *rdev = info->dev->dev_private;
836
837 WREG32_IO(reg*4, val);
838}
839
0c195119
AD
840/**
841 * cail_ioreg_read - read IO register
842 *
843 * @info: atom card_info pointer
844 * @reg: IO register offset
845 *
846 * Provides an IO register accessor for the atom interpreter (r4xx+).
847 * Returns the value of the IO register.
848 */
351a52a2
AD
849static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
850{
851 struct radeon_device *rdev = info->dev->dev_private;
852 uint32_t r;
853
854 r = RREG32_IO(reg*4);
855 return r;
856}
857
0c195119
AD
858/**
859 * radeon_atombios_init - init the driver info and callbacks for atombios
860 *
861 * @rdev: radeon_device pointer
862 *
863 * Initializes the driver info and register access callbacks for the
864 * ATOM interpreter (r4xx+).
865 * Returns 0 on sucess, -ENOMEM on failure.
866 * Called at driver startup.
867 */
771fe6b9
JG
868int radeon_atombios_init(struct radeon_device *rdev)
869{
61c4b24b
MF
870 struct card_info *atom_card_info =
871 kzalloc(sizeof(struct card_info), GFP_KERNEL);
872
873 if (!atom_card_info)
874 return -ENOMEM;
875
876 rdev->mode_info.atom_card_info = atom_card_info;
877 atom_card_info->dev = rdev->ddev;
878 atom_card_info->reg_read = cail_reg_read;
879 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
880 /* needed for iio ops */
881 if (rdev->rio_mem) {
882 atom_card_info->ioreg_read = cail_ioreg_read;
883 atom_card_info->ioreg_write = cail_ioreg_write;
884 } else {
885 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
886 atom_card_info->ioreg_read = cail_reg_read;
887 atom_card_info->ioreg_write = cail_reg_write;
888 }
61c4b24b
MF
889 atom_card_info->mc_read = cail_mc_read;
890 atom_card_info->mc_write = cail_mc_write;
891 atom_card_info->pll_read = cail_pll_read;
892 atom_card_info->pll_write = cail_pll_write;
893
894 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
895 if (!rdev->mode_info.atom_context) {
896 radeon_atombios_fini(rdev);
897 return -ENOMEM;
898 }
899
c31ad97f 900 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 901 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 902 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
903 return 0;
904}
905
0c195119
AD
906/**
907 * radeon_atombios_fini - free the driver info and callbacks for atombios
908 *
909 * @rdev: radeon_device pointer
910 *
911 * Frees the driver info and register access callbacks for the ATOM
912 * interpreter (r4xx+).
913 * Called at driver shutdown.
914 */
771fe6b9
JG
915void radeon_atombios_fini(struct radeon_device *rdev)
916{
4a04a844
JG
917 if (rdev->mode_info.atom_context) {
918 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 919 }
0e34d094
TG
920 kfree(rdev->mode_info.atom_context);
921 rdev->mode_info.atom_context = NULL;
61c4b24b 922 kfree(rdev->mode_info.atom_card_info);
0e34d094 923 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
924}
925
0c195119
AD
926/* COMBIOS */
927/*
928 * COMBIOS is the bios format prior to ATOM. It provides
929 * command tables similar to ATOM, but doesn't have a unified
930 * parser. See radeon_combios.c
931 */
932
933/**
934 * radeon_combios_init - init the driver info for combios
935 *
936 * @rdev: radeon_device pointer
937 *
938 * Initializes the driver info for combios (r1xx-r3xx).
939 * Returns 0 on sucess.
940 * Called at driver startup.
941 */
771fe6b9
JG
942int radeon_combios_init(struct radeon_device *rdev)
943{
944 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
945 return 0;
946}
947
0c195119
AD
948/**
949 * radeon_combios_fini - free the driver info for combios
950 *
951 * @rdev: radeon_device pointer
952 *
953 * Frees the driver info for combios (r1xx-r3xx).
954 * Called at driver shutdown.
955 */
771fe6b9
JG
956void radeon_combios_fini(struct radeon_device *rdev)
957{
958}
959
0c195119
AD
960/* if we get transitioned to only one device, take VGA back */
961/**
962 * radeon_vga_set_decode - enable/disable vga decode
963 *
964 * @cookie: radeon_device pointer
965 * @state: enable/disable vga decode
966 *
967 * Enable/disable vga decode (all asics).
968 * Returns VGA resource flags.
969 */
28d52043
DA
970static unsigned int radeon_vga_set_decode(void *cookie, bool state)
971{
972 struct radeon_device *rdev = cookie;
28d52043
DA
973 radeon_vga_set_state(rdev, state);
974 if (state)
975 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
976 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
977 else
978 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
979}
c1176d6f 980
1bcb04f7
CK
981/**
982 * radeon_check_pot_argument - check that argument is a power of two
983 *
984 * @arg: value to check
985 *
986 * Validates that a certain argument is a power of two (all asics).
987 * Returns true if argument is valid.
988 */
989static bool radeon_check_pot_argument(int arg)
990{
991 return (arg & (arg - 1)) == 0;
992}
993
0c195119
AD
994/**
995 * radeon_check_arguments - validate module params
996 *
997 * @rdev: radeon_device pointer
998 *
999 * Validates certain module parameters and updates
1000 * the associated values used by the driver (all asics).
1001 */
1109ca09 1002static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1003{
1004 /* vramlimit must be a power of two */
1bcb04f7 1005 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1006 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1007 radeon_vram_limit);
1008 radeon_vram_limit = 0;
36421338 1009 }
1bcb04f7 1010
edcd26e8
AD
1011 if (radeon_gart_size == -1) {
1012 /* default to a larger gart size on newer asics */
1013 if (rdev->family >= CHIP_RV770)
1014 radeon_gart_size = 1024;
1015 else
1016 radeon_gart_size = 512;
1017 }
36421338 1018 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1019 if (radeon_gart_size < 32) {
edcd26e8 1020 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1021 radeon_gart_size);
edcd26e8
AD
1022 if (rdev->family >= CHIP_RV770)
1023 radeon_gart_size = 1024;
1024 else
1025 radeon_gart_size = 512;
1bcb04f7 1026 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1027 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1028 radeon_gart_size);
edcd26e8
AD
1029 if (rdev->family >= CHIP_RV770)
1030 radeon_gart_size = 1024;
1031 else
1032 radeon_gart_size = 512;
36421338 1033 }
1bcb04f7
CK
1034 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1035
36421338
JG
1036 /* AGP mode can only be -1, 1, 2, 4, 8 */
1037 switch (radeon_agpmode) {
1038 case -1:
1039 case 0:
1040 case 1:
1041 case 2:
1042 case 4:
1043 case 8:
1044 break;
1045 default:
1046 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1047 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1048 radeon_agpmode = 0;
1049 break;
1050 }
1051}
1052
d1f9809e
ML
1053/**
1054 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1055 * needed for waking up.
1056 *
1057 * @pdev: pci dev pointer
1058 */
1059static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1060{
1061
1062 /* 6600m in a macbook pro */
1063 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1064 pdev->subsystem_device == 0x00e2) {
1065 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1066 return true;
1067 }
1068
1069 return false;
1070}
1071
0c195119
AD
1072/**
1073 * radeon_switcheroo_set_state - set switcheroo state
1074 *
1075 * @pdev: pci dev pointer
1076 * @state: vga switcheroo state
1077 *
1078 * Callback for the switcheroo driver. Suspends or resumes the
1079 * the asics before or after it is powered up using ACPI methods.
1080 */
6a9ee8af
DA
1081static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1082{
1083 struct drm_device *dev = pci_get_drvdata(pdev);
10ebc0bc
DA
1084
1085 if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
1086 return;
1087
6a9ee8af 1088 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1089 unsigned d3_delay = dev->pdev->d3_delay;
1090
6a9ee8af
DA
1091 printk(KERN_INFO "radeon: switched on\n");
1092 /* don't suspend or resume card normally */
5bcf719b 1093 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
1094
1095 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1096 dev->pdev->d3_delay = 20;
1097
10ebc0bc 1098 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1099
1100 dev->pdev->d3_delay = d3_delay;
1101
5bcf719b 1102 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1103 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1104 } else {
1105 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1106 drm_kms_helper_poll_disable(dev);
5bcf719b 1107 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
10ebc0bc 1108 radeon_suspend_kms(dev, true, true);
5bcf719b 1109 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1110 }
1111}
1112
0c195119
AD
1113/**
1114 * radeon_switcheroo_can_switch - see if switcheroo state can change
1115 *
1116 * @pdev: pci dev pointer
1117 *
1118 * Callback for the switcheroo driver. Check of the switcheroo
1119 * state can be changed.
1120 * Returns true if the state can be changed, false if not.
1121 */
6a9ee8af
DA
1122static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1123{
1124 struct drm_device *dev = pci_get_drvdata(pdev);
1125 bool can_switch;
1126
1127 spin_lock(&dev->count_lock);
1128 can_switch = (dev->open_count == 0);
1129 spin_unlock(&dev->count_lock);
1130 return can_switch;
1131}
1132
26ec685f
TI
1133static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1134 .set_gpu_state = radeon_switcheroo_set_state,
1135 .reprobe = NULL,
1136 .can_switch = radeon_switcheroo_can_switch,
1137};
6a9ee8af 1138
0c195119
AD
1139/**
1140 * radeon_device_init - initialize the driver
1141 *
1142 * @rdev: radeon_device pointer
1143 * @pdev: drm dev pointer
1144 * @pdev: pci dev pointer
1145 * @flags: driver flags
1146 *
1147 * Initializes the driver info and hw (all asics).
1148 * Returns 0 for success or an error on failure.
1149 * Called at driver startup.
1150 */
771fe6b9
JG
1151int radeon_device_init(struct radeon_device *rdev,
1152 struct drm_device *ddev,
1153 struct pci_dev *pdev,
1154 uint32_t flags)
1155{
351a52a2 1156 int r, i;
ad49f501 1157 int dma_bits;
10ebc0bc 1158 bool runtime = false;
771fe6b9 1159
771fe6b9 1160 rdev->shutdown = false;
9f022ddf 1161 rdev->dev = &pdev->dev;
771fe6b9
JG
1162 rdev->ddev = ddev;
1163 rdev->pdev = pdev;
1164 rdev->flags = flags;
1165 rdev->family = flags & RADEON_FAMILY_MASK;
1166 rdev->is_atom_bios = false;
1167 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1168 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1169 rdev->accel_working = false;
8b25ed34
AD
1170 /* set up ring ids */
1171 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1172 rdev->ring[i].idx = i;
1173 }
1b5331d9 1174
d522d9cc
TR
1175 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1176 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1177 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1178
771fe6b9
JG
1179 /* mutex initialization are all done here so we
1180 * can recall function without having locking issues */
d6999bc7 1181 mutex_init(&rdev->ring_lock);
40bacf16 1182 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1183 atomic_set(&rdev->ih.lock, 0);
4c788679 1184 mutex_init(&rdev->gem.mutex);
c913e23a 1185 mutex_init(&rdev->pm.mutex);
6759a0a7 1186 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1187 mutex_init(&rdev->srbm_mutex);
db7fce39 1188 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1189 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1190 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1191 r = radeon_gem_init(rdev);
1192 if (r)
1193 return r;
721604a1 1194 /* initialize vm here */
36ff39c4 1195 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1196 /* Adjust VM size here.
1197 * Currently set to 4GB ((1 << 20) 4k pages).
1198 * Max GPUVM size for cayman and SI is 40 bits.
1199 */
721604a1
JG
1200 rdev->vm_manager.max_pfn = 1 << 20;
1201 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1202
4aac0473
JG
1203 /* Set asic functions */
1204 r = radeon_asic_init(rdev);
36421338 1205 if (r)
4aac0473 1206 return r;
36421338 1207 radeon_check_arguments(rdev);
4aac0473 1208
f95df9ca
AD
1209 /* all of the newer IGP chips have an internal gart
1210 * However some rs4xx report as AGP, so remove that here.
1211 */
1212 if ((rdev->family >= CHIP_RS400) &&
1213 (rdev->flags & RADEON_IS_IGP)) {
1214 rdev->flags &= ~RADEON_IS_AGP;
1215 }
1216
30256a3f 1217 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1218 radeon_agp_disable(rdev);
771fe6b9
JG
1219 }
1220
9ed8b1f9
AD
1221 /* Set the internal MC address mask
1222 * This is the max address of the GPU's
1223 * internal address space.
1224 */
1225 if (rdev->family >= CHIP_CAYMAN)
1226 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1227 else if (rdev->family >= CHIP_CEDAR)
1228 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1229 else
1230 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1231
ad49f501
DA
1232 /* set DMA mask + need_dma32 flags.
1233 * PCIE - can handle 40-bits.
005a83f1 1234 * IGP - can handle 40-bits
ad49f501 1235 * AGP - generally dma32 is safest
005a83f1 1236 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1237 */
1238 rdev->need_dma32 = false;
1239 if (rdev->flags & RADEON_IS_AGP)
1240 rdev->need_dma32 = true;
005a83f1 1241 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1242 (rdev->family <= CHIP_RS740))
ad49f501
DA
1243 rdev->need_dma32 = true;
1244
1245 dma_bits = rdev->need_dma32 ? 32 : 40;
1246 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1247 if (r) {
62fff811 1248 rdev->need_dma32 = true;
c52494f6 1249 dma_bits = 32;
771fe6b9
JG
1250 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1251 }
c52494f6
KRW
1252 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1253 if (r) {
1254 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1255 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1256 }
771fe6b9
JG
1257
1258 /* Registers mapping */
1259 /* TODO: block userspace mapping of io register */
2c385151 1260 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1261 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1262 spin_lock_init(&rdev->pll_idx_lock);
1263 spin_lock_init(&rdev->mc_idx_lock);
1264 spin_lock_init(&rdev->pcie_idx_lock);
1265 spin_lock_init(&rdev->pciep_idx_lock);
1266 spin_lock_init(&rdev->pif_idx_lock);
1267 spin_lock_init(&rdev->cg_idx_lock);
1268 spin_lock_init(&rdev->uvd_idx_lock);
1269 spin_lock_init(&rdev->rcu_idx_lock);
1270 spin_lock_init(&rdev->didt_idx_lock);
1271 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1272 if (rdev->family >= CHIP_BONAIRE) {
1273 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1274 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1275 } else {
1276 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1277 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1278 }
771fe6b9
JG
1279 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1280 if (rdev->rmmio == NULL) {
1281 return -ENOMEM;
1282 }
1283 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1284 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1285
75efdee1
AD
1286 /* doorbell bar mapping */
1287 if (rdev->family >= CHIP_BONAIRE)
1288 radeon_doorbell_init(rdev);
1289
351a52a2
AD
1290 /* io port mapping */
1291 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1292 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1293 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1294 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1295 break;
1296 }
1297 }
1298 if (rdev->rio_mem == NULL)
1299 DRM_ERROR("Unable to find PCI I/O BAR\n");
1300
28d52043 1301 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1302 /* this will fail for cards that aren't VGA class devices, just
1303 * ignore it */
1304 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc
DA
1305
1306 if (radeon_runtime_pm == 1)
1307 runtime = true;
1308 if ((radeon_runtime_pm == -1) && radeon_is_px())
1309 runtime = true;
1310 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1311 if (runtime)
1312 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1313
3ce0a23d 1314 r = radeon_init(rdev);
b574f251 1315 if (r)
3ce0a23d 1316 return r;
3ce0a23d 1317
04eb2206
CK
1318 r = radeon_ib_ring_tests(rdev);
1319 if (r)
1320 DRM_ERROR("ib ring test failed (%d).\n", r);
1321
409851f4
JG
1322 r = radeon_gem_debugfs_init(rdev);
1323 if (r) {
1324 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1325 }
1326
b574f251
JG
1327 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1328 /* Acceleration not working on AGP card try again
1329 * with fallback to PCI or PCIE GART
1330 */
a2d07b74 1331 radeon_asic_reset(rdev);
b574f251
JG
1332 radeon_fini(rdev);
1333 radeon_agp_disable(rdev);
1334 r = radeon_init(rdev);
4aac0473
JG
1335 if (r)
1336 return r;
771fe6b9 1337 }
6c7bccea 1338
60a7e396 1339 if ((radeon_testing & 1)) {
4a1132a0
AD
1340 if (rdev->accel_working)
1341 radeon_test_moves(rdev);
1342 else
1343 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1344 }
60a7e396 1345 if ((radeon_testing & 2)) {
4a1132a0
AD
1346 if (rdev->accel_working)
1347 radeon_test_syncing(rdev);
1348 else
1349 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1350 }
771fe6b9 1351 if (radeon_benchmarking) {
4a1132a0
AD
1352 if (rdev->accel_working)
1353 radeon_benchmark(rdev, radeon_benchmarking);
1354 else
1355 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1356 }
6cf8a3f5 1357 return 0;
771fe6b9
JG
1358}
1359
4d8bf9ae
CK
1360static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1361
0c195119
AD
1362/**
1363 * radeon_device_fini - tear down the driver
1364 *
1365 * @rdev: radeon_device pointer
1366 *
1367 * Tear down the driver info (all asics).
1368 * Called at driver shutdown.
1369 */
771fe6b9
JG
1370void radeon_device_fini(struct radeon_device *rdev)
1371{
771fe6b9
JG
1372 DRM_INFO("radeon: finishing device.\n");
1373 rdev->shutdown = true;
90aca4d2
JG
1374 /* evict vram memory */
1375 radeon_bo_evict_vram(rdev);
62a8ea3f 1376 radeon_fini(rdev);
6a9ee8af 1377 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1378 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1379 if (rdev->rio_mem)
1380 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1381 rdev->rio_mem = NULL;
771fe6b9
JG
1382 iounmap(rdev->rmmio);
1383 rdev->rmmio = NULL;
75efdee1
AD
1384 if (rdev->family >= CHIP_BONAIRE)
1385 radeon_doorbell_fini(rdev);
4d8bf9ae 1386 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1387}
1388
1389
1390/*
1391 * Suspend & resume.
1392 */
0c195119
AD
1393/**
1394 * radeon_suspend_kms - initiate device suspend
1395 *
1396 * @pdev: drm dev pointer
1397 * @state: suspend state
1398 *
1399 * Puts the hw in the suspend state (all asics).
1400 * Returns 0 for success or an error on failure.
1401 * Called at driver suspend.
1402 */
10ebc0bc 1403int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
771fe6b9 1404{
875c1866 1405 struct radeon_device *rdev;
771fe6b9 1406 struct drm_crtc *crtc;
d8dcaa1d 1407 struct drm_connector *connector;
7465280c 1408 int i, r;
5f8f635e 1409 bool force_completion = false;
771fe6b9 1410
875c1866 1411 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1412 return -ENODEV;
1413 }
7473e830 1414
875c1866
DJ
1415 rdev = dev->dev_private;
1416
5bcf719b 1417 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1418 return 0;
d8dcaa1d 1419
86698c20
SF
1420 drm_kms_helper_poll_disable(dev);
1421
d8dcaa1d
AD
1422 /* turn off display hw */
1423 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1424 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1425 }
1426
771fe6b9
JG
1427 /* unpin the front buffers */
1428 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1429 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1430 struct radeon_bo *robj;
771fe6b9
JG
1431
1432 if (rfb == NULL || rfb->obj == NULL) {
1433 continue;
1434 }
7e4d15d9 1435 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1436 /* don't unpin kernel fb objects */
1437 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1438 r = radeon_bo_reserve(robj, false);
38651674 1439 if (r == 0) {
4c788679
JG
1440 radeon_bo_unpin(robj);
1441 radeon_bo_unreserve(robj);
1442 }
771fe6b9
JG
1443 }
1444 }
1445 /* evict vram memory */
4c788679 1446 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1447
1448 mutex_lock(&rdev->ring_lock);
771fe6b9 1449 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1450 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1451 r = radeon_fence_wait_empty_locked(rdev, i);
1452 if (r) {
1453 /* delay GPU reset to resume */
1454 force_completion = true;
1455 }
1456 }
1457 if (force_completion) {
1458 radeon_fence_driver_force_completion(rdev);
1459 }
8a47cc9e 1460 mutex_unlock(&rdev->ring_lock);
771fe6b9 1461
f657c2a7
YZ
1462 radeon_save_bios_scratch_regs(rdev);
1463
62a8ea3f 1464 radeon_suspend(rdev);
d4877cf2 1465 radeon_hpd_fini(rdev);
771fe6b9 1466 /* evict remaining vram memory */
4c788679 1467 radeon_bo_evict_vram(rdev);
771fe6b9 1468
10b06122
JG
1469 radeon_agp_suspend(rdev);
1470
771fe6b9 1471 pci_save_state(dev->pdev);
7473e830 1472 if (suspend) {
771fe6b9
JG
1473 /* Shut down the device */
1474 pci_disable_device(dev->pdev);
1475 pci_set_power_state(dev->pdev, PCI_D3hot);
1476 }
10ebc0bc
DA
1477
1478 if (fbcon) {
1479 console_lock();
1480 radeon_fbdev_set_suspend(rdev, 1);
1481 console_unlock();
1482 }
771fe6b9
JG
1483 return 0;
1484}
1485
0c195119
AD
1486/**
1487 * radeon_resume_kms - initiate device resume
1488 *
1489 * @pdev: drm dev pointer
1490 *
1491 * Bring the hw back to operating state (all asics).
1492 * Returns 0 for success or an error on failure.
1493 * Called at driver resume.
1494 */
10ebc0bc 1495int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1496{
09bdf591 1497 struct drm_connector *connector;
771fe6b9 1498 struct radeon_device *rdev = dev->dev_private;
04eb2206 1499 int r;
771fe6b9 1500
5bcf719b 1501 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1502 return 0;
1503
10ebc0bc
DA
1504 if (fbcon) {
1505 console_lock();
1506 }
7473e830
DA
1507 if (resume) {
1508 pci_set_power_state(dev->pdev, PCI_D0);
1509 pci_restore_state(dev->pdev);
1510 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1511 if (fbcon)
1512 console_unlock();
7473e830
DA
1513 return -1;
1514 }
771fe6b9 1515 }
0ebf1717
DA
1516 /* resume AGP if in use */
1517 radeon_agp_resume(rdev);
62a8ea3f 1518 radeon_resume(rdev);
04eb2206
CK
1519
1520 r = radeon_ib_ring_tests(rdev);
1521 if (r)
1522 DRM_ERROR("ib ring test failed (%d).\n", r);
1523
bc6a6295 1524 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1525 /* do dpm late init */
1526 r = radeon_pm_late_init(rdev);
1527 if (r) {
1528 rdev->pm.dpm_enabled = false;
1529 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1530 }
bc6a6295
AD
1531 } else {
1532 /* resume old pm late */
1533 radeon_pm_resume(rdev);
6c7bccea
AD
1534 }
1535
f657c2a7 1536 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1537
10ebc0bc
DA
1538 if (fbcon) {
1539 radeon_fbdev_set_suspend(rdev, 0);
1540 console_unlock();
1541 }
6c7bccea 1542
3fa47d9e
AD
1543 /* init dig PHYs, disp eng pll */
1544 if (rdev->is_atom_bios) {
ac89af1e 1545 radeon_atom_encoder_init(rdev);
f3f1f03e 1546 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1547 /* turn on the BL */
1548 if (rdev->mode_info.bl_encoder) {
1549 u8 bl_level = radeon_get_backlight_level(rdev,
1550 rdev->mode_info.bl_encoder);
1551 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1552 bl_level);
1553 }
3fa47d9e 1554 }
d4877cf2
AD
1555 /* reset hpd state */
1556 radeon_hpd_init(rdev);
771fe6b9
JG
1557 /* blat the mode back in */
1558 drm_helper_resume_force_mode(dev);
a93f344d
AD
1559 /* turn on display hw */
1560 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1561 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1562 }
86698c20
SF
1563
1564 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1565 return 0;
1566}
1567
0c195119
AD
1568/**
1569 * radeon_gpu_reset - reset the asic
1570 *
1571 * @rdev: radeon device pointer
1572 *
1573 * Attempt the reset the GPU if it has hung (all asics).
1574 * Returns 0 for success or an error on failure.
1575 */
90aca4d2
JG
1576int radeon_gpu_reset(struct radeon_device *rdev)
1577{
55d7c221
CK
1578 unsigned ring_sizes[RADEON_NUM_RINGS];
1579 uint32_t *ring_data[RADEON_NUM_RINGS];
1580
1581 bool saved = false;
1582
1583 int i, r;
8fd1b84c 1584 int resched;
90aca4d2 1585
dee53e7f 1586 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1587
1588 if (!rdev->needs_reset) {
1589 up_write(&rdev->exclusive_lock);
1590 return 0;
1591 }
1592
1593 rdev->needs_reset = false;
1594
90aca4d2 1595 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1596 /* block TTM */
1597 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
95f59509 1598 radeon_pm_suspend(rdev);
90aca4d2
JG
1599 radeon_suspend(rdev);
1600
55d7c221
CK
1601 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1602 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1603 &ring_data[i]);
1604 if (ring_sizes[i]) {
1605 saved = true;
1606 dev_info(rdev->dev, "Saved %d dwords of commands "
1607 "on ring %d.\n", ring_sizes[i], i);
1608 }
1609 }
1610
1611retry:
90aca4d2
JG
1612 r = radeon_asic_reset(rdev);
1613 if (!r) {
55d7c221 1614 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1615 radeon_resume(rdev);
55d7c221 1616 }
04eb2206 1617
55d7c221 1618 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1619
55d7c221
CK
1620 if (!r) {
1621 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1622 radeon_ring_restore(rdev, &rdev->ring[i],
1623 ring_sizes[i], ring_data[i]);
f54b350d
CK
1624 ring_sizes[i] = 0;
1625 ring_data[i] = NULL;
55d7c221
CK
1626 }
1627
1628 r = radeon_ib_ring_tests(rdev);
1629 if (r) {
1630 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1631 if (saved) {
f54b350d 1632 saved = false;
55d7c221
CK
1633 radeon_suspend(rdev);
1634 goto retry;
1635 }
1636 }
1637 } else {
76903b96 1638 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1639 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1640 kfree(ring_data[i]);
1641 }
90aca4d2 1642 }
7a1619b9 1643
95f59509 1644 radeon_pm_resume(rdev);
d3493574
JG
1645 drm_helper_resume_force_mode(rdev->ddev);
1646
55d7c221 1647 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1648 if (r) {
1649 /* bad news, how to tell it to userspace ? */
1650 dev_info(rdev->dev, "GPU reset failed\n");
1651 }
1652
dee53e7f 1653 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1654 return r;
1655}
1656
771fe6b9
JG
1657
1658/*
1659 * Debugfs
1660 */
771fe6b9
JG
1661int radeon_debugfs_add_files(struct radeon_device *rdev,
1662 struct drm_info_list *files,
1663 unsigned nfiles)
1664{
1665 unsigned i;
1666
4d8bf9ae
CK
1667 for (i = 0; i < rdev->debugfs_count; i++) {
1668 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1669 /* Already registered */
1670 return 0;
1671 }
1672 }
c245cb9e 1673
4d8bf9ae 1674 i = rdev->debugfs_count + 1;
c245cb9e
MW
1675 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1676 DRM_ERROR("Reached maximum number of debugfs components.\n");
1677 DRM_ERROR("Report so we increase "
1678 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1679 return -EINVAL;
1680 }
4d8bf9ae
CK
1681 rdev->debugfs[rdev->debugfs_count].files = files;
1682 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1683 rdev->debugfs_count = i;
771fe6b9
JG
1684#if defined(CONFIG_DEBUG_FS)
1685 drm_debugfs_create_files(files, nfiles,
1686 rdev->ddev->control->debugfs_root,
1687 rdev->ddev->control);
1688 drm_debugfs_create_files(files, nfiles,
1689 rdev->ddev->primary->debugfs_root,
1690 rdev->ddev->primary);
1691#endif
1692 return 0;
1693}
1694
4d8bf9ae
CK
1695static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1696{
1697#if defined(CONFIG_DEBUG_FS)
1698 unsigned i;
1699
1700 for (i = 0; i < rdev->debugfs_count; i++) {
1701 drm_debugfs_remove_files(rdev->debugfs[i].files,
1702 rdev->debugfs[i].num_files,
1703 rdev->ddev->control);
1704 drm_debugfs_remove_files(rdev->debugfs[i].files,
1705 rdev->debugfs[i].num_files,
1706 rdev->ddev->primary);
1707 }
1708#endif
1709}
1710
771fe6b9
JG
1711#if defined(CONFIG_DEBUG_FS)
1712int radeon_debugfs_init(struct drm_minor *minor)
1713{
1714 return 0;
1715}
1716
1717void radeon_debugfs_cleanup(struct drm_minor *minor)
1718{
771fe6b9
JG
1719}
1720#endif
This page took 0.418427 seconds and 5 git commands to generate.