drm/radeon: convert to pmops
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
1b5331d9
JG
101 "LAST",
102};
103
2e1b65f9
AD
104/**
105 * radeon_program_register_sequence - program an array of registers.
106 *
107 * @rdev: radeon_device pointer
108 * @registers: pointer to the register array
109 * @array_size: size of the register array
110 *
111 * Programs an array or registers with and and or masks.
112 * This is a helper for setting golden registers.
113 */
114void radeon_program_register_sequence(struct radeon_device *rdev,
115 const u32 *registers,
116 const u32 array_size)
117{
118 u32 tmp, reg, and_mask, or_mask;
119 int i;
120
121 if (array_size % 3)
122 return;
123
124 for (i = 0; i < array_size; i +=3) {
125 reg = registers[i + 0];
126 and_mask = registers[i + 1];
127 or_mask = registers[i + 2];
128
129 if (and_mask == 0xffffffff) {
130 tmp = or_mask;
131 } else {
132 tmp = RREG32(reg);
133 tmp &= ~and_mask;
134 tmp |= or_mask;
135 }
136 WREG32(reg, tmp);
137 }
138}
139
0c195119
AD
140/**
141 * radeon_surface_init - Clear GPU surface registers.
142 *
143 * @rdev: radeon_device pointer
144 *
145 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 146 */
3ce0a23d 147void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
148{
149 /* FIXME: check this out */
150 if (rdev->family < CHIP_R600) {
151 int i;
152
550e2d92
DA
153 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
154 if (rdev->surface_regs[i].bo)
155 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
156 else
157 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 158 }
e024e110
DA
159 /* enable surfaces */
160 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
161 }
162}
163
771fe6b9
JG
164/*
165 * GPU scratch registers helpers function.
166 */
0c195119
AD
167/**
168 * radeon_scratch_init - Init scratch register driver information.
169 *
170 * @rdev: radeon_device pointer
171 *
172 * Init CP scratch register driver information (r1xx-r5xx)
173 */
3ce0a23d 174void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
175{
176 int i;
177
178 /* FIXME: check this out */
179 if (rdev->family < CHIP_R300) {
180 rdev->scratch.num_reg = 5;
181 } else {
182 rdev->scratch.num_reg = 7;
183 }
724c80e1 184 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
185 for (i = 0; i < rdev->scratch.num_reg; i++) {
186 rdev->scratch.free[i] = true;
724c80e1 187 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
188 }
189}
190
0c195119
AD
191/**
192 * radeon_scratch_get - Allocate a scratch register
193 *
194 * @rdev: radeon_device pointer
195 * @reg: scratch register mmio offset
196 *
197 * Allocate a CP scratch register for use by the driver (all asics).
198 * Returns 0 on success or -EINVAL on failure.
199 */
771fe6b9
JG
200int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
201{
202 int i;
203
204 for (i = 0; i < rdev->scratch.num_reg; i++) {
205 if (rdev->scratch.free[i]) {
206 rdev->scratch.free[i] = false;
207 *reg = rdev->scratch.reg[i];
208 return 0;
209 }
210 }
211 return -EINVAL;
212}
213
0c195119
AD
214/**
215 * radeon_scratch_free - Free a scratch register
216 *
217 * @rdev: radeon_device pointer
218 * @reg: scratch register mmio offset
219 *
220 * Free a CP scratch register allocated for use by the driver (all asics)
221 */
771fe6b9
JG
222void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
223{
224 int i;
225
226 for (i = 0; i < rdev->scratch.num_reg; i++) {
227 if (rdev->scratch.reg[i] == reg) {
228 rdev->scratch.free[i] = true;
229 return;
230 }
231 }
232}
233
75efdee1
AD
234/*
235 * GPU doorbell aperture helpers function.
236 */
237/**
238 * radeon_doorbell_init - Init doorbell driver information.
239 *
240 * @rdev: radeon_device pointer
241 *
242 * Init doorbell driver information (CIK)
243 * Returns 0 on success, error on failure.
244 */
245int radeon_doorbell_init(struct radeon_device *rdev)
246{
247 int i;
248
249 /* doorbell bar mapping */
250 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
251 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
252
253 /* limit to 4 MB for now */
254 if (rdev->doorbell.size > (4 * 1024 * 1024))
255 rdev->doorbell.size = 4 * 1024 * 1024;
256
257 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
258 if (rdev->doorbell.ptr == NULL) {
259 return -ENOMEM;
260 }
261 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
262 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
263
264 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
265
266 for (i = 0; i < rdev->doorbell.num_pages; i++) {
267 rdev->doorbell.free[i] = true;
268 }
269 return 0;
270}
271
272/**
273 * radeon_doorbell_fini - Tear down doorbell driver information.
274 *
275 * @rdev: radeon_device pointer
276 *
277 * Tear down doorbell driver information (CIK)
278 */
279void radeon_doorbell_fini(struct radeon_device *rdev)
280{
281 iounmap(rdev->doorbell.ptr);
282 rdev->doorbell.ptr = NULL;
283}
284
285/**
286 * radeon_doorbell_get - Allocate a doorbell page
287 *
288 * @rdev: radeon_device pointer
289 * @doorbell: doorbell page number
290 *
291 * Allocate a doorbell page for use by the driver (all asics).
292 * Returns 0 on success or -EINVAL on failure.
293 */
294int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
295{
296 int i;
297
298 for (i = 0; i < rdev->doorbell.num_pages; i++) {
299 if (rdev->doorbell.free[i]) {
300 rdev->doorbell.free[i] = false;
301 *doorbell = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
308/**
309 * radeon_doorbell_free - Free a doorbell page
310 *
311 * @rdev: radeon_device pointer
312 * @doorbell: doorbell page number
313 *
314 * Free a doorbell page allocated for use by the driver (all asics)
315 */
316void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
317{
318 if (doorbell < rdev->doorbell.num_pages)
319 rdev->doorbell.free[doorbell] = true;
320}
321
0c195119
AD
322/*
323 * radeon_wb_*()
324 * Writeback is the the method by which the the GPU updates special pages
325 * in memory with the status of certain GPU events (fences, ring pointers,
326 * etc.).
327 */
328
329/**
330 * radeon_wb_disable - Disable Writeback
331 *
332 * @rdev: radeon_device pointer
333 *
334 * Disables Writeback (all asics). Used for suspend.
335 */
724c80e1
AD
336void radeon_wb_disable(struct radeon_device *rdev)
337{
724c80e1
AD
338 rdev->wb.enabled = false;
339}
340
0c195119
AD
341/**
342 * radeon_wb_fini - Disable Writeback and free memory
343 *
344 * @rdev: radeon_device pointer
345 *
346 * Disables Writeback and frees the Writeback memory (all asics).
347 * Used at driver shutdown.
348 */
724c80e1
AD
349void radeon_wb_fini(struct radeon_device *rdev)
350{
351 radeon_wb_disable(rdev);
352 if (rdev->wb.wb_obj) {
089920f2
JG
353 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
354 radeon_bo_kunmap(rdev->wb.wb_obj);
355 radeon_bo_unpin(rdev->wb.wb_obj);
356 radeon_bo_unreserve(rdev->wb.wb_obj);
357 }
724c80e1
AD
358 radeon_bo_unref(&rdev->wb.wb_obj);
359 rdev->wb.wb = NULL;
360 rdev->wb.wb_obj = NULL;
361 }
362}
363
0c195119
AD
364/**
365 * radeon_wb_init- Init Writeback driver info and allocate memory
366 *
367 * @rdev: radeon_device pointer
368 *
369 * Disables Writeback and frees the Writeback memory (all asics).
370 * Used at driver startup.
371 * Returns 0 on success or an -error on failure.
372 */
724c80e1
AD
373int radeon_wb_init(struct radeon_device *rdev)
374{
375 int r;
376
377 if (rdev->wb.wb_obj == NULL) {
441921d5 378 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 379 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
380 if (r) {
381 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
382 return r;
383 }
089920f2
JG
384 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
385 if (unlikely(r != 0)) {
386 radeon_wb_fini(rdev);
387 return r;
388 }
389 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
390 &rdev->wb.gpu_addr);
391 if (r) {
392 radeon_bo_unreserve(rdev->wb.wb_obj);
393 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
394 radeon_wb_fini(rdev);
395 return r;
396 }
397 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 398 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
399 if (r) {
400 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
401 radeon_wb_fini(rdev);
402 return r;
403 }
724c80e1
AD
404 }
405
e6ba7599
AD
406 /* clear wb memory */
407 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
408 /* disable event_write fences */
409 rdev->wb.use_event = false;
724c80e1 410 /* disabled via module param */
3b7a2b24 411 if (radeon_no_wb == 1) {
724c80e1 412 rdev->wb.enabled = false;
3b7a2b24 413 } else {
724c80e1 414 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
415 /* often unreliable on AGP */
416 rdev->wb.enabled = false;
417 } else if (rdev->family < CHIP_R300) {
418 /* often unreliable on pre-r300 */
724c80e1 419 rdev->wb.enabled = false;
d0f8a854 420 } else {
724c80e1 421 rdev->wb.enabled = true;
d0f8a854 422 /* event_write fences are only available on r600+ */
3b7a2b24 423 if (rdev->family >= CHIP_R600) {
d0f8a854 424 rdev->wb.use_event = true;
3b7a2b24 425 }
d0f8a854 426 }
724c80e1 427 }
c994ead6
AD
428 /* always use writeback/events on NI, APUs */
429 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
430 rdev->wb.enabled = true;
431 rdev->wb.use_event = true;
432 }
724c80e1
AD
433
434 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
435
436 return 0;
437}
438
d594e46a
JG
439/**
440 * radeon_vram_location - try to find VRAM location
441 * @rdev: radeon device structure holding all necessary informations
442 * @mc: memory controller structure holding memory informations
443 * @base: base address at which to put VRAM
444 *
445 * Function will place try to place VRAM at base address provided
446 * as parameter (which is so far either PCI aperture address or
447 * for IGP TOM base address).
448 *
449 * If there is not enough space to fit the unvisible VRAM in the 32bits
450 * address space then we limit the VRAM size to the aperture.
451 *
452 * If we are using AGP and if the AGP aperture doesn't allow us to have
453 * room for all the VRAM than we restrict the VRAM to the PCI aperture
454 * size and print a warning.
455 *
456 * This function will never fails, worst case are limiting VRAM.
457 *
458 * Note: GTT start, end, size should be initialized before calling this
459 * function on AGP platform.
460 *
25985edc 461 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
462 * this shouldn't be a problem as we are using the PCI aperture as a reference.
463 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
464 * not IGP.
465 *
466 * Note: we use mc_vram_size as on some board we need to program the mc to
467 * cover the whole aperture even if VRAM size is inferior to aperture size
468 * Novell bug 204882 + along with lots of ubuntu ones
469 *
470 * Note: when limiting vram it's safe to overwritte real_vram_size because
471 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
472 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
473 * ones)
474 *
475 * Note: IGP TOM addr should be the same as the aperture addr, we don't
476 * explicitly check for that thought.
477 *
478 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 479 */
d594e46a 480void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 481{
1bcb04f7
CK
482 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
483
d594e46a 484 mc->vram_start = base;
9ed8b1f9 485 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
486 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
487 mc->real_vram_size = mc->aper_size;
488 mc->mc_vram_size = mc->aper_size;
489 }
490 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 491 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
492 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
493 mc->real_vram_size = mc->aper_size;
494 mc->mc_vram_size = mc->aper_size;
495 }
496 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
497 if (limit && limit < mc->real_vram_size)
498 mc->real_vram_size = limit;
dd7cc55a 499 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
500 mc->mc_vram_size >> 20, mc->vram_start,
501 mc->vram_end, mc->real_vram_size >> 20);
502}
771fe6b9 503
d594e46a
JG
504/**
505 * radeon_gtt_location - try to find GTT location
506 * @rdev: radeon device structure holding all necessary informations
507 * @mc: memory controller structure holding memory informations
508 *
509 * Function will place try to place GTT before or after VRAM.
510 *
511 * If GTT size is bigger than space left then we ajust GTT size.
512 * Thus function will never fails.
513 *
514 * FIXME: when reducing GTT size align new size on power of 2.
515 */
516void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
517{
518 u64 size_af, size_bf;
519
9ed8b1f9 520 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 521 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
522 if (size_bf > size_af) {
523 if (mc->gtt_size > size_bf) {
524 dev_warn(rdev->dev, "limiting GTT\n");
525 mc->gtt_size = size_bf;
771fe6b9 526 }
8d369bb1 527 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 528 } else {
d594e46a
JG
529 if (mc->gtt_size > size_af) {
530 dev_warn(rdev->dev, "limiting GTT\n");
531 mc->gtt_size = size_af;
532 }
8d369bb1 533 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 534 }
d594e46a 535 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 536 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 537 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
538}
539
771fe6b9
JG
540/*
541 * GPU helpers function.
542 */
0c195119
AD
543/**
544 * radeon_card_posted - check if the hw has already been initialized
545 *
546 * @rdev: radeon_device pointer
547 *
548 * Check if the asic has been initialized (all asics).
549 * Used at driver startup.
550 * Returns true if initialized or false if not.
551 */
9f022ddf 552bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
553{
554 uint32_t reg;
555
50a583f6 556 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 557 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
558 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
559 (rdev->family < CHIP_R600))
bcc65fd8
MG
560 return false;
561
2cf3a4fc
AD
562 if (ASIC_IS_NODCE(rdev))
563 goto check_memsize;
564
771fe6b9 565 /* first check CRTCs */
09fb8bd1 566 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
567 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
568 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
569 if (rdev->num_crtc >= 4) {
570 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
571 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
572 }
573 if (rdev->num_crtc >= 6) {
574 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
575 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
576 }
bcc1c2a1
AD
577 if (reg & EVERGREEN_CRTC_MASTER_EN)
578 return true;
579 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
580 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
581 RREG32(AVIVO_D2CRTC_CONTROL);
582 if (reg & AVIVO_CRTC_EN) {
583 return true;
584 }
585 } else {
586 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
587 RREG32(RADEON_CRTC2_GEN_CNTL);
588 if (reg & RADEON_CRTC_EN) {
589 return true;
590 }
591 }
592
2cf3a4fc 593check_memsize:
771fe6b9
JG
594 /* then check MEM_SIZE, in case the crtcs are off */
595 if (rdev->family >= CHIP_R600)
596 reg = RREG32(R600_CONFIG_MEMSIZE);
597 else
598 reg = RREG32(RADEON_CONFIG_MEMSIZE);
599
600 if (reg)
601 return true;
602
603 return false;
604
605}
606
0c195119
AD
607/**
608 * radeon_update_bandwidth_info - update display bandwidth params
609 *
610 * @rdev: radeon_device pointer
611 *
612 * Used when sclk/mclk are switched or display modes are set.
613 * params are used to calculate display watermarks (all asics)
614 */
f47299c5
AD
615void radeon_update_bandwidth_info(struct radeon_device *rdev)
616{
617 fixed20_12 a;
8807286e
AD
618 u32 sclk = rdev->pm.current_sclk;
619 u32 mclk = rdev->pm.current_mclk;
f47299c5 620
8807286e
AD
621 /* sclk/mclk in Mhz */
622 a.full = dfixed_const(100);
623 rdev->pm.sclk.full = dfixed_const(sclk);
624 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
625 rdev->pm.mclk.full = dfixed_const(mclk);
626 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 627
8807286e 628 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 629 a.full = dfixed_const(16);
f47299c5 630 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 631 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
632 }
633}
634
0c195119
AD
635/**
636 * radeon_boot_test_post_card - check and possibly initialize the hw
637 *
638 * @rdev: radeon_device pointer
639 *
640 * Check if the asic is initialized and if not, attempt to initialize
641 * it (all asics).
642 * Returns true if initialized or false if not.
643 */
72542d77
DA
644bool radeon_boot_test_post_card(struct radeon_device *rdev)
645{
646 if (radeon_card_posted(rdev))
647 return true;
648
649 if (rdev->bios) {
650 DRM_INFO("GPU not posted. posting now...\n");
651 if (rdev->is_atom_bios)
652 atom_asic_init(rdev->mode_info.atom_context);
653 else
654 radeon_combios_asic_init(rdev->ddev);
655 return true;
656 } else {
657 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
658 return false;
659 }
660}
661
0c195119
AD
662/**
663 * radeon_dummy_page_init - init dummy page used by the driver
664 *
665 * @rdev: radeon_device pointer
666 *
667 * Allocate the dummy page used by the driver (all asics).
668 * This dummy page is used by the driver as a filler for gart entries
669 * when pages are taken out of the GART
670 * Returns 0 on sucess, -ENOMEM on failure.
671 */
3ce0a23d
JG
672int radeon_dummy_page_init(struct radeon_device *rdev)
673{
82568565
DA
674 if (rdev->dummy_page.page)
675 return 0;
3ce0a23d
JG
676 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
677 if (rdev->dummy_page.page == NULL)
678 return -ENOMEM;
679 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
680 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
681 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
682 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
683 __free_page(rdev->dummy_page.page);
684 rdev->dummy_page.page = NULL;
685 return -ENOMEM;
686 }
687 return 0;
688}
689
0c195119
AD
690/**
691 * radeon_dummy_page_fini - free dummy page used by the driver
692 *
693 * @rdev: radeon_device pointer
694 *
695 * Frees the dummy page used by the driver (all asics).
696 */
3ce0a23d
JG
697void radeon_dummy_page_fini(struct radeon_device *rdev)
698{
699 if (rdev->dummy_page.page == NULL)
700 return;
701 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
702 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
703 __free_page(rdev->dummy_page.page);
704 rdev->dummy_page.page = NULL;
705}
706
771fe6b9 707
771fe6b9 708/* ATOM accessor methods */
0c195119
AD
709/*
710 * ATOM is an interpreted byte code stored in tables in the vbios. The
711 * driver registers callbacks to access registers and the interpreter
712 * in the driver parses the tables and executes then to program specific
713 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
714 * atombios.h, and atom.c
715 */
716
717/**
718 * cail_pll_read - read PLL register
719 *
720 * @info: atom card_info pointer
721 * @reg: PLL register offset
722 *
723 * Provides a PLL register accessor for the atom interpreter (r4xx+).
724 * Returns the value of the PLL register.
725 */
771fe6b9
JG
726static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
727{
728 struct radeon_device *rdev = info->dev->dev_private;
729 uint32_t r;
730
731 r = rdev->pll_rreg(rdev, reg);
732 return r;
733}
734
0c195119
AD
735/**
736 * cail_pll_write - write PLL register
737 *
738 * @info: atom card_info pointer
739 * @reg: PLL register offset
740 * @val: value to write to the pll register
741 *
742 * Provides a PLL register accessor for the atom interpreter (r4xx+).
743 */
771fe6b9
JG
744static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
745{
746 struct radeon_device *rdev = info->dev->dev_private;
747
748 rdev->pll_wreg(rdev, reg, val);
749}
750
0c195119
AD
751/**
752 * cail_mc_read - read MC (Memory Controller) register
753 *
754 * @info: atom card_info pointer
755 * @reg: MC register offset
756 *
757 * Provides an MC register accessor for the atom interpreter (r4xx+).
758 * Returns the value of the MC register.
759 */
771fe6b9
JG
760static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
761{
762 struct radeon_device *rdev = info->dev->dev_private;
763 uint32_t r;
764
765 r = rdev->mc_rreg(rdev, reg);
766 return r;
767}
768
0c195119
AD
769/**
770 * cail_mc_write - write MC (Memory Controller) register
771 *
772 * @info: atom card_info pointer
773 * @reg: MC register offset
774 * @val: value to write to the pll register
775 *
776 * Provides a MC register accessor for the atom interpreter (r4xx+).
777 */
771fe6b9
JG
778static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
779{
780 struct radeon_device *rdev = info->dev->dev_private;
781
782 rdev->mc_wreg(rdev, reg, val);
783}
784
0c195119
AD
785/**
786 * cail_reg_write - write MMIO register
787 *
788 * @info: atom card_info pointer
789 * @reg: MMIO register offset
790 * @val: value to write to the pll register
791 *
792 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
793 */
771fe6b9
JG
794static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
795{
796 struct radeon_device *rdev = info->dev->dev_private;
797
798 WREG32(reg*4, val);
799}
800
0c195119
AD
801/**
802 * cail_reg_read - read MMIO register
803 *
804 * @info: atom card_info pointer
805 * @reg: MMIO register offset
806 *
807 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
808 * Returns the value of the MMIO register.
809 */
771fe6b9
JG
810static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
811{
812 struct radeon_device *rdev = info->dev->dev_private;
813 uint32_t r;
814
815 r = RREG32(reg*4);
816 return r;
817}
818
0c195119
AD
819/**
820 * cail_ioreg_write - write IO register
821 *
822 * @info: atom card_info pointer
823 * @reg: IO register offset
824 * @val: value to write to the pll register
825 *
826 * Provides a IO register accessor for the atom interpreter (r4xx+).
827 */
351a52a2
AD
828static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
829{
830 struct radeon_device *rdev = info->dev->dev_private;
831
832 WREG32_IO(reg*4, val);
833}
834
0c195119
AD
835/**
836 * cail_ioreg_read - read IO register
837 *
838 * @info: atom card_info pointer
839 * @reg: IO register offset
840 *
841 * Provides an IO register accessor for the atom interpreter (r4xx+).
842 * Returns the value of the IO register.
843 */
351a52a2
AD
844static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
845{
846 struct radeon_device *rdev = info->dev->dev_private;
847 uint32_t r;
848
849 r = RREG32_IO(reg*4);
850 return r;
851}
852
0c195119
AD
853/**
854 * radeon_atombios_init - init the driver info and callbacks for atombios
855 *
856 * @rdev: radeon_device pointer
857 *
858 * Initializes the driver info and register access callbacks for the
859 * ATOM interpreter (r4xx+).
860 * Returns 0 on sucess, -ENOMEM on failure.
861 * Called at driver startup.
862 */
771fe6b9
JG
863int radeon_atombios_init(struct radeon_device *rdev)
864{
61c4b24b
MF
865 struct card_info *atom_card_info =
866 kzalloc(sizeof(struct card_info), GFP_KERNEL);
867
868 if (!atom_card_info)
869 return -ENOMEM;
870
871 rdev->mode_info.atom_card_info = atom_card_info;
872 atom_card_info->dev = rdev->ddev;
873 atom_card_info->reg_read = cail_reg_read;
874 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
875 /* needed for iio ops */
876 if (rdev->rio_mem) {
877 atom_card_info->ioreg_read = cail_ioreg_read;
878 atom_card_info->ioreg_write = cail_ioreg_write;
879 } else {
880 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
881 atom_card_info->ioreg_read = cail_reg_read;
882 atom_card_info->ioreg_write = cail_reg_write;
883 }
61c4b24b
MF
884 atom_card_info->mc_read = cail_mc_read;
885 atom_card_info->mc_write = cail_mc_write;
886 atom_card_info->pll_read = cail_pll_read;
887 atom_card_info->pll_write = cail_pll_write;
888
889 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
890 if (!rdev->mode_info.atom_context) {
891 radeon_atombios_fini(rdev);
892 return -ENOMEM;
893 }
894
c31ad97f 895 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 896 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 897 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
898 return 0;
899}
900
0c195119
AD
901/**
902 * radeon_atombios_fini - free the driver info and callbacks for atombios
903 *
904 * @rdev: radeon_device pointer
905 *
906 * Frees the driver info and register access callbacks for the ATOM
907 * interpreter (r4xx+).
908 * Called at driver shutdown.
909 */
771fe6b9
JG
910void radeon_atombios_fini(struct radeon_device *rdev)
911{
4a04a844
JG
912 if (rdev->mode_info.atom_context) {
913 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 914 }
0e34d094
TG
915 kfree(rdev->mode_info.atom_context);
916 rdev->mode_info.atom_context = NULL;
61c4b24b 917 kfree(rdev->mode_info.atom_card_info);
0e34d094 918 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
919}
920
0c195119
AD
921/* COMBIOS */
922/*
923 * COMBIOS is the bios format prior to ATOM. It provides
924 * command tables similar to ATOM, but doesn't have a unified
925 * parser. See radeon_combios.c
926 */
927
928/**
929 * radeon_combios_init - init the driver info for combios
930 *
931 * @rdev: radeon_device pointer
932 *
933 * Initializes the driver info for combios (r1xx-r3xx).
934 * Returns 0 on sucess.
935 * Called at driver startup.
936 */
771fe6b9
JG
937int radeon_combios_init(struct radeon_device *rdev)
938{
939 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
940 return 0;
941}
942
0c195119
AD
943/**
944 * radeon_combios_fini - free the driver info for combios
945 *
946 * @rdev: radeon_device pointer
947 *
948 * Frees the driver info for combios (r1xx-r3xx).
949 * Called at driver shutdown.
950 */
771fe6b9
JG
951void radeon_combios_fini(struct radeon_device *rdev)
952{
953}
954
0c195119
AD
955/* if we get transitioned to only one device, take VGA back */
956/**
957 * radeon_vga_set_decode - enable/disable vga decode
958 *
959 * @cookie: radeon_device pointer
960 * @state: enable/disable vga decode
961 *
962 * Enable/disable vga decode (all asics).
963 * Returns VGA resource flags.
964 */
28d52043
DA
965static unsigned int radeon_vga_set_decode(void *cookie, bool state)
966{
967 struct radeon_device *rdev = cookie;
28d52043
DA
968 radeon_vga_set_state(rdev, state);
969 if (state)
970 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
971 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
972 else
973 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
974}
c1176d6f 975
1bcb04f7
CK
976/**
977 * radeon_check_pot_argument - check that argument is a power of two
978 *
979 * @arg: value to check
980 *
981 * Validates that a certain argument is a power of two (all asics).
982 * Returns true if argument is valid.
983 */
984static bool radeon_check_pot_argument(int arg)
985{
986 return (arg & (arg - 1)) == 0;
987}
988
0c195119
AD
989/**
990 * radeon_check_arguments - validate module params
991 *
992 * @rdev: radeon_device pointer
993 *
994 * Validates certain module parameters and updates
995 * the associated values used by the driver (all asics).
996 */
1109ca09 997static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
998{
999 /* vramlimit must be a power of two */
1bcb04f7 1000 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1001 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1002 radeon_vram_limit);
1003 radeon_vram_limit = 0;
36421338 1004 }
1bcb04f7 1005
edcd26e8
AD
1006 if (radeon_gart_size == -1) {
1007 /* default to a larger gart size on newer asics */
1008 if (rdev->family >= CHIP_RV770)
1009 radeon_gart_size = 1024;
1010 else
1011 radeon_gart_size = 512;
1012 }
36421338 1013 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1014 if (radeon_gart_size < 32) {
edcd26e8 1015 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1016 radeon_gart_size);
edcd26e8
AD
1017 if (rdev->family >= CHIP_RV770)
1018 radeon_gart_size = 1024;
1019 else
1020 radeon_gart_size = 512;
1bcb04f7 1021 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1022 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1023 radeon_gart_size);
edcd26e8
AD
1024 if (rdev->family >= CHIP_RV770)
1025 radeon_gart_size = 1024;
1026 else
1027 radeon_gart_size = 512;
36421338 1028 }
1bcb04f7
CK
1029 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1030
36421338
JG
1031 /* AGP mode can only be -1, 1, 2, 4, 8 */
1032 switch (radeon_agpmode) {
1033 case -1:
1034 case 0:
1035 case 1:
1036 case 2:
1037 case 4:
1038 case 8:
1039 break;
1040 default:
1041 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1042 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1043 radeon_agpmode = 0;
1044 break;
1045 }
1046}
1047
d1f9809e
ML
1048/**
1049 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1050 * needed for waking up.
1051 *
1052 * @pdev: pci dev pointer
1053 */
1054static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1055{
1056
1057 /* 6600m in a macbook pro */
1058 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1059 pdev->subsystem_device == 0x00e2) {
1060 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1061 return true;
1062 }
1063
1064 return false;
1065}
1066
0c195119
AD
1067/**
1068 * radeon_switcheroo_set_state - set switcheroo state
1069 *
1070 * @pdev: pci dev pointer
1071 * @state: vga switcheroo state
1072 *
1073 * Callback for the switcheroo driver. Suspends or resumes the
1074 * the asics before or after it is powered up using ACPI methods.
1075 */
6a9ee8af
DA
1076static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1077{
1078 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1079 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1080 unsigned d3_delay = dev->pdev->d3_delay;
1081
6a9ee8af
DA
1082 printk(KERN_INFO "radeon: switched on\n");
1083 /* don't suspend or resume card normally */
5bcf719b 1084 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
1085
1086 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1087 dev->pdev->d3_delay = 20;
1088
7473e830 1089 radeon_resume_kms(dev, 1);
d1f9809e
ML
1090
1091 dev->pdev->d3_delay = d3_delay;
1092
5bcf719b 1093 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1094 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1095 } else {
1096 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1097 drm_kms_helper_poll_disable(dev);
5bcf719b 1098 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
7473e830 1099 radeon_suspend_kms(dev, 1);
5bcf719b 1100 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1101 }
1102}
1103
0c195119
AD
1104/**
1105 * radeon_switcheroo_can_switch - see if switcheroo state can change
1106 *
1107 * @pdev: pci dev pointer
1108 *
1109 * Callback for the switcheroo driver. Check of the switcheroo
1110 * state can be changed.
1111 * Returns true if the state can be changed, false if not.
1112 */
6a9ee8af
DA
1113static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1114{
1115 struct drm_device *dev = pci_get_drvdata(pdev);
1116 bool can_switch;
1117
1118 spin_lock(&dev->count_lock);
1119 can_switch = (dev->open_count == 0);
1120 spin_unlock(&dev->count_lock);
1121 return can_switch;
1122}
1123
26ec685f
TI
1124static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1125 .set_gpu_state = radeon_switcheroo_set_state,
1126 .reprobe = NULL,
1127 .can_switch = radeon_switcheroo_can_switch,
1128};
6a9ee8af 1129
0c195119
AD
1130/**
1131 * radeon_device_init - initialize the driver
1132 *
1133 * @rdev: radeon_device pointer
1134 * @pdev: drm dev pointer
1135 * @pdev: pci dev pointer
1136 * @flags: driver flags
1137 *
1138 * Initializes the driver info and hw (all asics).
1139 * Returns 0 for success or an error on failure.
1140 * Called at driver startup.
1141 */
771fe6b9
JG
1142int radeon_device_init(struct radeon_device *rdev,
1143 struct drm_device *ddev,
1144 struct pci_dev *pdev,
1145 uint32_t flags)
1146{
351a52a2 1147 int r, i;
ad49f501 1148 int dma_bits;
771fe6b9 1149
771fe6b9 1150 rdev->shutdown = false;
9f022ddf 1151 rdev->dev = &pdev->dev;
771fe6b9
JG
1152 rdev->ddev = ddev;
1153 rdev->pdev = pdev;
1154 rdev->flags = flags;
1155 rdev->family = flags & RADEON_FAMILY_MASK;
1156 rdev->is_atom_bios = false;
1157 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1158 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1159 rdev->accel_working = false;
8b25ed34
AD
1160 /* set up ring ids */
1161 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1162 rdev->ring[i].idx = i;
1163 }
1b5331d9 1164
d522d9cc
TR
1165 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1166 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1167 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1168
771fe6b9
JG
1169 /* mutex initialization are all done here so we
1170 * can recall function without having locking issues */
d6999bc7 1171 mutex_init(&rdev->ring_lock);
40bacf16 1172 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1173 atomic_set(&rdev->ih.lock, 0);
4c788679 1174 mutex_init(&rdev->gem.mutex);
c913e23a 1175 mutex_init(&rdev->pm.mutex);
6759a0a7 1176 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1177 mutex_init(&rdev->srbm_mutex);
db7fce39 1178 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1179 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1180 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1181 r = radeon_gem_init(rdev);
1182 if (r)
1183 return r;
721604a1 1184 /* initialize vm here */
36ff39c4 1185 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1186 /* Adjust VM size here.
1187 * Currently set to 4GB ((1 << 20) 4k pages).
1188 * Max GPUVM size for cayman and SI is 40 bits.
1189 */
721604a1
JG
1190 rdev->vm_manager.max_pfn = 1 << 20;
1191 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1192
4aac0473
JG
1193 /* Set asic functions */
1194 r = radeon_asic_init(rdev);
36421338 1195 if (r)
4aac0473 1196 return r;
36421338 1197 radeon_check_arguments(rdev);
4aac0473 1198
f95df9ca
AD
1199 /* all of the newer IGP chips have an internal gart
1200 * However some rs4xx report as AGP, so remove that here.
1201 */
1202 if ((rdev->family >= CHIP_RS400) &&
1203 (rdev->flags & RADEON_IS_IGP)) {
1204 rdev->flags &= ~RADEON_IS_AGP;
1205 }
1206
30256a3f 1207 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1208 radeon_agp_disable(rdev);
771fe6b9
JG
1209 }
1210
9ed8b1f9
AD
1211 /* Set the internal MC address mask
1212 * This is the max address of the GPU's
1213 * internal address space.
1214 */
1215 if (rdev->family >= CHIP_CAYMAN)
1216 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1217 else if (rdev->family >= CHIP_CEDAR)
1218 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1219 else
1220 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1221
ad49f501
DA
1222 /* set DMA mask + need_dma32 flags.
1223 * PCIE - can handle 40-bits.
005a83f1 1224 * IGP - can handle 40-bits
ad49f501 1225 * AGP - generally dma32 is safest
005a83f1 1226 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1227 */
1228 rdev->need_dma32 = false;
1229 if (rdev->flags & RADEON_IS_AGP)
1230 rdev->need_dma32 = true;
005a83f1 1231 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1232 (rdev->family <= CHIP_RS740))
ad49f501
DA
1233 rdev->need_dma32 = true;
1234
1235 dma_bits = rdev->need_dma32 ? 32 : 40;
1236 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1237 if (r) {
62fff811 1238 rdev->need_dma32 = true;
c52494f6 1239 dma_bits = 32;
771fe6b9
JG
1240 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1241 }
c52494f6
KRW
1242 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1243 if (r) {
1244 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1245 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1246 }
771fe6b9
JG
1247
1248 /* Registers mapping */
1249 /* TODO: block userspace mapping of io register */
2c385151 1250 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1251 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1252 spin_lock_init(&rdev->pll_idx_lock);
1253 spin_lock_init(&rdev->mc_idx_lock);
1254 spin_lock_init(&rdev->pcie_idx_lock);
1255 spin_lock_init(&rdev->pciep_idx_lock);
1256 spin_lock_init(&rdev->pif_idx_lock);
1257 spin_lock_init(&rdev->cg_idx_lock);
1258 spin_lock_init(&rdev->uvd_idx_lock);
1259 spin_lock_init(&rdev->rcu_idx_lock);
1260 spin_lock_init(&rdev->didt_idx_lock);
1261 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1262 if (rdev->family >= CHIP_BONAIRE) {
1263 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1264 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1265 } else {
1266 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1267 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1268 }
771fe6b9
JG
1269 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1270 if (rdev->rmmio == NULL) {
1271 return -ENOMEM;
1272 }
1273 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1274 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1275
75efdee1
AD
1276 /* doorbell bar mapping */
1277 if (rdev->family >= CHIP_BONAIRE)
1278 radeon_doorbell_init(rdev);
1279
351a52a2
AD
1280 /* io port mapping */
1281 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1282 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1283 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1284 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1285 break;
1286 }
1287 }
1288 if (rdev->rio_mem == NULL)
1289 DRM_ERROR("Unable to find PCI I/O BAR\n");
1290
28d52043 1291 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1292 /* this will fail for cards that aren't VGA class devices, just
1293 * ignore it */
1294 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
0d69704a 1295 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, false);
28d52043 1296
3ce0a23d 1297 r = radeon_init(rdev);
b574f251 1298 if (r)
3ce0a23d 1299 return r;
3ce0a23d 1300
04eb2206
CK
1301 r = radeon_ib_ring_tests(rdev);
1302 if (r)
1303 DRM_ERROR("ib ring test failed (%d).\n", r);
1304
409851f4
JG
1305 r = radeon_gem_debugfs_init(rdev);
1306 if (r) {
1307 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1308 }
1309
b574f251
JG
1310 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1311 /* Acceleration not working on AGP card try again
1312 * with fallback to PCI or PCIE GART
1313 */
a2d07b74 1314 radeon_asic_reset(rdev);
b574f251
JG
1315 radeon_fini(rdev);
1316 radeon_agp_disable(rdev);
1317 r = radeon_init(rdev);
4aac0473
JG
1318 if (r)
1319 return r;
771fe6b9 1320 }
60a7e396 1321 if ((radeon_testing & 1)) {
4a1132a0
AD
1322 if (rdev->accel_working)
1323 radeon_test_moves(rdev);
1324 else
1325 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1326 }
60a7e396 1327 if ((radeon_testing & 2)) {
4a1132a0
AD
1328 if (rdev->accel_working)
1329 radeon_test_syncing(rdev);
1330 else
1331 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1332 }
771fe6b9 1333 if (radeon_benchmarking) {
4a1132a0
AD
1334 if (rdev->accel_working)
1335 radeon_benchmark(rdev, radeon_benchmarking);
1336 else
1337 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1338 }
6cf8a3f5 1339 return 0;
771fe6b9
JG
1340}
1341
4d8bf9ae
CK
1342static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1343
0c195119
AD
1344/**
1345 * radeon_device_fini - tear down the driver
1346 *
1347 * @rdev: radeon_device pointer
1348 *
1349 * Tear down the driver info (all asics).
1350 * Called at driver shutdown.
1351 */
771fe6b9
JG
1352void radeon_device_fini(struct radeon_device *rdev)
1353{
771fe6b9
JG
1354 DRM_INFO("radeon: finishing device.\n");
1355 rdev->shutdown = true;
90aca4d2
JG
1356 /* evict vram memory */
1357 radeon_bo_evict_vram(rdev);
62a8ea3f 1358 radeon_fini(rdev);
6a9ee8af 1359 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1360 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1361 if (rdev->rio_mem)
1362 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1363 rdev->rio_mem = NULL;
771fe6b9
JG
1364 iounmap(rdev->rmmio);
1365 rdev->rmmio = NULL;
75efdee1
AD
1366 if (rdev->family >= CHIP_BONAIRE)
1367 radeon_doorbell_fini(rdev);
4d8bf9ae 1368 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1369}
1370
1371
1372/*
1373 * Suspend & resume.
1374 */
0c195119
AD
1375/**
1376 * radeon_suspend_kms - initiate device suspend
1377 *
1378 * @pdev: drm dev pointer
1379 * @state: suspend state
1380 *
1381 * Puts the hw in the suspend state (all asics).
1382 * Returns 0 for success or an error on failure.
1383 * Called at driver suspend.
1384 */
7473e830 1385int radeon_suspend_kms(struct drm_device *dev, bool suspend)
771fe6b9 1386{
875c1866 1387 struct radeon_device *rdev;
771fe6b9 1388 struct drm_crtc *crtc;
d8dcaa1d 1389 struct drm_connector *connector;
7465280c 1390 int i, r;
5f8f635e 1391 bool force_completion = false;
771fe6b9 1392
875c1866 1393 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1394 return -ENODEV;
1395 }
7473e830 1396
875c1866
DJ
1397 rdev = dev->dev_private;
1398
5bcf719b 1399 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1400 return 0;
d8dcaa1d 1401
86698c20
SF
1402 drm_kms_helper_poll_disable(dev);
1403
d8dcaa1d
AD
1404 /* turn off display hw */
1405 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1406 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1407 }
1408
771fe6b9
JG
1409 /* unpin the front buffers */
1410 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1411 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1412 struct radeon_bo *robj;
771fe6b9
JG
1413
1414 if (rfb == NULL || rfb->obj == NULL) {
1415 continue;
1416 }
7e4d15d9 1417 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1418 /* don't unpin kernel fb objects */
1419 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1420 r = radeon_bo_reserve(robj, false);
38651674 1421 if (r == 0) {
4c788679
JG
1422 radeon_bo_unpin(robj);
1423 radeon_bo_unreserve(robj);
1424 }
771fe6b9
JG
1425 }
1426 }
1427 /* evict vram memory */
4c788679 1428 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1429
1430 mutex_lock(&rdev->ring_lock);
771fe6b9 1431 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1432 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1433 r = radeon_fence_wait_empty_locked(rdev, i);
1434 if (r) {
1435 /* delay GPU reset to resume */
1436 force_completion = true;
1437 }
1438 }
1439 if (force_completion) {
1440 radeon_fence_driver_force_completion(rdev);
1441 }
8a47cc9e 1442 mutex_unlock(&rdev->ring_lock);
771fe6b9 1443
f657c2a7
YZ
1444 radeon_save_bios_scratch_regs(rdev);
1445
ce8f5370 1446 radeon_pm_suspend(rdev);
62a8ea3f 1447 radeon_suspend(rdev);
d4877cf2 1448 radeon_hpd_fini(rdev);
771fe6b9 1449 /* evict remaining vram memory */
4c788679 1450 radeon_bo_evict_vram(rdev);
771fe6b9 1451
10b06122
JG
1452 radeon_agp_suspend(rdev);
1453
771fe6b9 1454 pci_save_state(dev->pdev);
7473e830 1455 if (suspend) {
771fe6b9
JG
1456 /* Shut down the device */
1457 pci_disable_device(dev->pdev);
1458 pci_set_power_state(dev->pdev, PCI_D3hot);
1459 }
ac751efa 1460 console_lock();
38651674 1461 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1462 console_unlock();
771fe6b9
JG
1463 return 0;
1464}
1465
0c195119
AD
1466/**
1467 * radeon_resume_kms - initiate device resume
1468 *
1469 * @pdev: drm dev pointer
1470 *
1471 * Bring the hw back to operating state (all asics).
1472 * Returns 0 for success or an error on failure.
1473 * Called at driver resume.
1474 */
7473e830 1475int radeon_resume_kms(struct drm_device *dev, bool resume)
771fe6b9 1476{
09bdf591 1477 struct drm_connector *connector;
771fe6b9 1478 struct radeon_device *rdev = dev->dev_private;
04eb2206 1479 int r;
771fe6b9 1480
5bcf719b 1481 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1482 return 0;
1483
ac751efa 1484 console_lock();
7473e830
DA
1485 if (resume) {
1486 pci_set_power_state(dev->pdev, PCI_D0);
1487 pci_restore_state(dev->pdev);
1488 if (pci_enable_device(dev->pdev)) {
1489 console_unlock();
1490 return -1;
1491 }
771fe6b9 1492 }
0ebf1717
DA
1493 /* resume AGP if in use */
1494 radeon_agp_resume(rdev);
62a8ea3f 1495 radeon_resume(rdev);
04eb2206
CK
1496
1497 r = radeon_ib_ring_tests(rdev);
1498 if (r)
1499 DRM_ERROR("ib ring test failed (%d).\n", r);
1500
ce8f5370 1501 radeon_pm_resume(rdev);
f657c2a7 1502 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1503
38651674 1504 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1505 console_unlock();
771fe6b9 1506
3fa47d9e
AD
1507 /* init dig PHYs, disp eng pll */
1508 if (rdev->is_atom_bios) {
ac89af1e 1509 radeon_atom_encoder_init(rdev);
f3f1f03e 1510 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1511 /* turn on the BL */
1512 if (rdev->mode_info.bl_encoder) {
1513 u8 bl_level = radeon_get_backlight_level(rdev,
1514 rdev->mode_info.bl_encoder);
1515 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1516 bl_level);
1517 }
3fa47d9e 1518 }
d4877cf2
AD
1519 /* reset hpd state */
1520 radeon_hpd_init(rdev);
771fe6b9
JG
1521 /* blat the mode back in */
1522 drm_helper_resume_force_mode(dev);
a93f344d
AD
1523 /* turn on display hw */
1524 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1525 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1526 }
86698c20
SF
1527
1528 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1529 return 0;
1530}
1531
0c195119
AD
1532/**
1533 * radeon_gpu_reset - reset the asic
1534 *
1535 * @rdev: radeon device pointer
1536 *
1537 * Attempt the reset the GPU if it has hung (all asics).
1538 * Returns 0 for success or an error on failure.
1539 */
90aca4d2
JG
1540int radeon_gpu_reset(struct radeon_device *rdev)
1541{
55d7c221
CK
1542 unsigned ring_sizes[RADEON_NUM_RINGS];
1543 uint32_t *ring_data[RADEON_NUM_RINGS];
1544
1545 bool saved = false;
1546
1547 int i, r;
8fd1b84c 1548 int resched;
90aca4d2 1549
dee53e7f 1550 down_write(&rdev->exclusive_lock);
90aca4d2 1551 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1552 /* block TTM */
1553 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
95f59509 1554 radeon_pm_suspend(rdev);
90aca4d2
JG
1555 radeon_suspend(rdev);
1556
55d7c221
CK
1557 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1558 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1559 &ring_data[i]);
1560 if (ring_sizes[i]) {
1561 saved = true;
1562 dev_info(rdev->dev, "Saved %d dwords of commands "
1563 "on ring %d.\n", ring_sizes[i], i);
1564 }
1565 }
1566
1567retry:
90aca4d2
JG
1568 r = radeon_asic_reset(rdev);
1569 if (!r) {
55d7c221 1570 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1571 radeon_resume(rdev);
55d7c221 1572 }
04eb2206 1573
55d7c221 1574 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1575
55d7c221
CK
1576 if (!r) {
1577 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1578 radeon_ring_restore(rdev, &rdev->ring[i],
1579 ring_sizes[i], ring_data[i]);
f54b350d
CK
1580 ring_sizes[i] = 0;
1581 ring_data[i] = NULL;
55d7c221
CK
1582 }
1583
1584 r = radeon_ib_ring_tests(rdev);
1585 if (r) {
1586 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1587 if (saved) {
f54b350d 1588 saved = false;
55d7c221
CK
1589 radeon_suspend(rdev);
1590 goto retry;
1591 }
1592 }
1593 } else {
76903b96 1594 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1595 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1596 kfree(ring_data[i]);
1597 }
90aca4d2 1598 }
7a1619b9 1599
95f59509 1600 radeon_pm_resume(rdev);
d3493574
JG
1601 drm_helper_resume_force_mode(rdev->ddev);
1602
55d7c221 1603 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1604 if (r) {
1605 /* bad news, how to tell it to userspace ? */
1606 dev_info(rdev->dev, "GPU reset failed\n");
1607 }
1608
dee53e7f 1609 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1610 return r;
1611}
1612
771fe6b9
JG
1613
1614/*
1615 * Debugfs
1616 */
771fe6b9
JG
1617int radeon_debugfs_add_files(struct radeon_device *rdev,
1618 struct drm_info_list *files,
1619 unsigned nfiles)
1620{
1621 unsigned i;
1622
4d8bf9ae
CK
1623 for (i = 0; i < rdev->debugfs_count; i++) {
1624 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1625 /* Already registered */
1626 return 0;
1627 }
1628 }
c245cb9e 1629
4d8bf9ae 1630 i = rdev->debugfs_count + 1;
c245cb9e
MW
1631 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1632 DRM_ERROR("Reached maximum number of debugfs components.\n");
1633 DRM_ERROR("Report so we increase "
1634 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1635 return -EINVAL;
1636 }
4d8bf9ae
CK
1637 rdev->debugfs[rdev->debugfs_count].files = files;
1638 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1639 rdev->debugfs_count = i;
771fe6b9
JG
1640#if defined(CONFIG_DEBUG_FS)
1641 drm_debugfs_create_files(files, nfiles,
1642 rdev->ddev->control->debugfs_root,
1643 rdev->ddev->control);
1644 drm_debugfs_create_files(files, nfiles,
1645 rdev->ddev->primary->debugfs_root,
1646 rdev->ddev->primary);
1647#endif
1648 return 0;
1649}
1650
4d8bf9ae
CK
1651static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1652{
1653#if defined(CONFIG_DEBUG_FS)
1654 unsigned i;
1655
1656 for (i = 0; i < rdev->debugfs_count; i++) {
1657 drm_debugfs_remove_files(rdev->debugfs[i].files,
1658 rdev->debugfs[i].num_files,
1659 rdev->ddev->control);
1660 drm_debugfs_remove_files(rdev->debugfs[i].files,
1661 rdev->debugfs[i].num_files,
1662 rdev->ddev->primary);
1663 }
1664#endif
1665}
1666
771fe6b9
JG
1667#if defined(CONFIG_DEBUG_FS)
1668int radeon_debugfs_init(struct drm_minor *minor)
1669{
1670 return 0;
1671}
1672
1673void radeon_debugfs_cleanup(struct drm_minor *minor)
1674{
771fe6b9
JG
1675}
1676#endif
This page took 0.346244 seconds and 5 git commands to generate.