Merge branch 'drm-next' of git://people.freedesktop.org/~robclark/linux into drm...
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
1b5331d9
JG
101 "LAST",
102};
103
2e1b65f9
AD
104/**
105 * radeon_program_register_sequence - program an array of registers.
106 *
107 * @rdev: radeon_device pointer
108 * @registers: pointer to the register array
109 * @array_size: size of the register array
110 *
111 * Programs an array or registers with and and or masks.
112 * This is a helper for setting golden registers.
113 */
114void radeon_program_register_sequence(struct radeon_device *rdev,
115 const u32 *registers,
116 const u32 array_size)
117{
118 u32 tmp, reg, and_mask, or_mask;
119 int i;
120
121 if (array_size % 3)
122 return;
123
124 for (i = 0; i < array_size; i +=3) {
125 reg = registers[i + 0];
126 and_mask = registers[i + 1];
127 or_mask = registers[i + 2];
128
129 if (and_mask == 0xffffffff) {
130 tmp = or_mask;
131 } else {
132 tmp = RREG32(reg);
133 tmp &= ~and_mask;
134 tmp |= or_mask;
135 }
136 WREG32(reg, tmp);
137 }
138}
139
0c195119
AD
140/**
141 * radeon_surface_init - Clear GPU surface registers.
142 *
143 * @rdev: radeon_device pointer
144 *
145 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 146 */
3ce0a23d 147void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
148{
149 /* FIXME: check this out */
150 if (rdev->family < CHIP_R600) {
151 int i;
152
550e2d92
DA
153 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
154 if (rdev->surface_regs[i].bo)
155 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
156 else
157 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 158 }
e024e110
DA
159 /* enable surfaces */
160 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
161 }
162}
163
771fe6b9
JG
164/*
165 * GPU scratch registers helpers function.
166 */
0c195119
AD
167/**
168 * radeon_scratch_init - Init scratch register driver information.
169 *
170 * @rdev: radeon_device pointer
171 *
172 * Init CP scratch register driver information (r1xx-r5xx)
173 */
3ce0a23d 174void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
175{
176 int i;
177
178 /* FIXME: check this out */
179 if (rdev->family < CHIP_R300) {
180 rdev->scratch.num_reg = 5;
181 } else {
182 rdev->scratch.num_reg = 7;
183 }
724c80e1 184 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
185 for (i = 0; i < rdev->scratch.num_reg; i++) {
186 rdev->scratch.free[i] = true;
724c80e1 187 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
188 }
189}
190
0c195119
AD
191/**
192 * radeon_scratch_get - Allocate a scratch register
193 *
194 * @rdev: radeon_device pointer
195 * @reg: scratch register mmio offset
196 *
197 * Allocate a CP scratch register for use by the driver (all asics).
198 * Returns 0 on success or -EINVAL on failure.
199 */
771fe6b9
JG
200int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
201{
202 int i;
203
204 for (i = 0; i < rdev->scratch.num_reg; i++) {
205 if (rdev->scratch.free[i]) {
206 rdev->scratch.free[i] = false;
207 *reg = rdev->scratch.reg[i];
208 return 0;
209 }
210 }
211 return -EINVAL;
212}
213
0c195119
AD
214/**
215 * radeon_scratch_free - Free a scratch register
216 *
217 * @rdev: radeon_device pointer
218 * @reg: scratch register mmio offset
219 *
220 * Free a CP scratch register allocated for use by the driver (all asics)
221 */
771fe6b9
JG
222void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
223{
224 int i;
225
226 for (i = 0; i < rdev->scratch.num_reg; i++) {
227 if (rdev->scratch.reg[i] == reg) {
228 rdev->scratch.free[i] = true;
229 return;
230 }
231 }
232}
233
75efdee1
AD
234/*
235 * GPU doorbell aperture helpers function.
236 */
237/**
238 * radeon_doorbell_init - Init doorbell driver information.
239 *
240 * @rdev: radeon_device pointer
241 *
242 * Init doorbell driver information (CIK)
243 * Returns 0 on success, error on failure.
244 */
245int radeon_doorbell_init(struct radeon_device *rdev)
246{
247 int i;
248
249 /* doorbell bar mapping */
250 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
251 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
252
253 /* limit to 4 MB for now */
254 if (rdev->doorbell.size > (4 * 1024 * 1024))
255 rdev->doorbell.size = 4 * 1024 * 1024;
256
257 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
258 if (rdev->doorbell.ptr == NULL) {
259 return -ENOMEM;
260 }
261 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
262 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
263
264 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
265
266 for (i = 0; i < rdev->doorbell.num_pages; i++) {
267 rdev->doorbell.free[i] = true;
268 }
269 return 0;
270}
271
272/**
273 * radeon_doorbell_fini - Tear down doorbell driver information.
274 *
275 * @rdev: radeon_device pointer
276 *
277 * Tear down doorbell driver information (CIK)
278 */
279void radeon_doorbell_fini(struct radeon_device *rdev)
280{
281 iounmap(rdev->doorbell.ptr);
282 rdev->doorbell.ptr = NULL;
283}
284
285/**
286 * radeon_doorbell_get - Allocate a doorbell page
287 *
288 * @rdev: radeon_device pointer
289 * @doorbell: doorbell page number
290 *
291 * Allocate a doorbell page for use by the driver (all asics).
292 * Returns 0 on success or -EINVAL on failure.
293 */
294int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
295{
296 int i;
297
298 for (i = 0; i < rdev->doorbell.num_pages; i++) {
299 if (rdev->doorbell.free[i]) {
300 rdev->doorbell.free[i] = false;
301 *doorbell = i;
302 return 0;
303 }
304 }
305 return -EINVAL;
306}
307
308/**
309 * radeon_doorbell_free - Free a doorbell page
310 *
311 * @rdev: radeon_device pointer
312 * @doorbell: doorbell page number
313 *
314 * Free a doorbell page allocated for use by the driver (all asics)
315 */
316void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
317{
318 if (doorbell < rdev->doorbell.num_pages)
319 rdev->doorbell.free[doorbell] = true;
320}
321
0c195119
AD
322/*
323 * radeon_wb_*()
324 * Writeback is the the method by which the the GPU updates special pages
325 * in memory with the status of certain GPU events (fences, ring pointers,
326 * etc.).
327 */
328
329/**
330 * radeon_wb_disable - Disable Writeback
331 *
332 * @rdev: radeon_device pointer
333 *
334 * Disables Writeback (all asics). Used for suspend.
335 */
724c80e1
AD
336void radeon_wb_disable(struct radeon_device *rdev)
337{
724c80e1
AD
338 rdev->wb.enabled = false;
339}
340
0c195119
AD
341/**
342 * radeon_wb_fini - Disable Writeback and free memory
343 *
344 * @rdev: radeon_device pointer
345 *
346 * Disables Writeback and frees the Writeback memory (all asics).
347 * Used at driver shutdown.
348 */
724c80e1
AD
349void radeon_wb_fini(struct radeon_device *rdev)
350{
351 radeon_wb_disable(rdev);
352 if (rdev->wb.wb_obj) {
089920f2
JG
353 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
354 radeon_bo_kunmap(rdev->wb.wb_obj);
355 radeon_bo_unpin(rdev->wb.wb_obj);
356 radeon_bo_unreserve(rdev->wb.wb_obj);
357 }
724c80e1
AD
358 radeon_bo_unref(&rdev->wb.wb_obj);
359 rdev->wb.wb = NULL;
360 rdev->wb.wb_obj = NULL;
361 }
362}
363
0c195119
AD
364/**
365 * radeon_wb_init- Init Writeback driver info and allocate memory
366 *
367 * @rdev: radeon_device pointer
368 *
369 * Disables Writeback and frees the Writeback memory (all asics).
370 * Used at driver startup.
371 * Returns 0 on success or an -error on failure.
372 */
724c80e1
AD
373int radeon_wb_init(struct radeon_device *rdev)
374{
375 int r;
376
377 if (rdev->wb.wb_obj == NULL) {
441921d5 378 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 379 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
380 if (r) {
381 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
382 return r;
383 }
089920f2
JG
384 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
385 if (unlikely(r != 0)) {
386 radeon_wb_fini(rdev);
387 return r;
388 }
389 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
390 &rdev->wb.gpu_addr);
391 if (r) {
392 radeon_bo_unreserve(rdev->wb.wb_obj);
393 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
394 radeon_wb_fini(rdev);
395 return r;
396 }
397 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 398 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
399 if (r) {
400 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
401 radeon_wb_fini(rdev);
402 return r;
403 }
724c80e1
AD
404 }
405
e6ba7599
AD
406 /* clear wb memory */
407 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
408 /* disable event_write fences */
409 rdev->wb.use_event = false;
724c80e1 410 /* disabled via module param */
3b7a2b24 411 if (radeon_no_wb == 1) {
724c80e1 412 rdev->wb.enabled = false;
3b7a2b24 413 } else {
724c80e1 414 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
415 /* often unreliable on AGP */
416 rdev->wb.enabled = false;
417 } else if (rdev->family < CHIP_R300) {
418 /* often unreliable on pre-r300 */
724c80e1 419 rdev->wb.enabled = false;
d0f8a854 420 } else {
724c80e1 421 rdev->wb.enabled = true;
d0f8a854 422 /* event_write fences are only available on r600+ */
3b7a2b24 423 if (rdev->family >= CHIP_R600) {
d0f8a854 424 rdev->wb.use_event = true;
3b7a2b24 425 }
d0f8a854 426 }
724c80e1 427 }
c994ead6
AD
428 /* always use writeback/events on NI, APUs */
429 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
430 rdev->wb.enabled = true;
431 rdev->wb.use_event = true;
432 }
724c80e1
AD
433
434 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
435
436 return 0;
437}
438
d594e46a
JG
439/**
440 * radeon_vram_location - try to find VRAM location
441 * @rdev: radeon device structure holding all necessary informations
442 * @mc: memory controller structure holding memory informations
443 * @base: base address at which to put VRAM
444 *
445 * Function will place try to place VRAM at base address provided
446 * as parameter (which is so far either PCI aperture address or
447 * for IGP TOM base address).
448 *
449 * If there is not enough space to fit the unvisible VRAM in the 32bits
450 * address space then we limit the VRAM size to the aperture.
451 *
452 * If we are using AGP and if the AGP aperture doesn't allow us to have
453 * room for all the VRAM than we restrict the VRAM to the PCI aperture
454 * size and print a warning.
455 *
456 * This function will never fails, worst case are limiting VRAM.
457 *
458 * Note: GTT start, end, size should be initialized before calling this
459 * function on AGP platform.
460 *
25985edc 461 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
462 * this shouldn't be a problem as we are using the PCI aperture as a reference.
463 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
464 * not IGP.
465 *
466 * Note: we use mc_vram_size as on some board we need to program the mc to
467 * cover the whole aperture even if VRAM size is inferior to aperture size
468 * Novell bug 204882 + along with lots of ubuntu ones
469 *
470 * Note: when limiting vram it's safe to overwritte real_vram_size because
471 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
472 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
473 * ones)
474 *
475 * Note: IGP TOM addr should be the same as the aperture addr, we don't
476 * explicitly check for that thought.
477 *
478 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 479 */
d594e46a 480void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 481{
1bcb04f7
CK
482 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
483
d594e46a 484 mc->vram_start = base;
9ed8b1f9 485 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
486 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
487 mc->real_vram_size = mc->aper_size;
488 mc->mc_vram_size = mc->aper_size;
489 }
490 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 491 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
492 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
493 mc->real_vram_size = mc->aper_size;
494 mc->mc_vram_size = mc->aper_size;
495 }
496 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
497 if (limit && limit < mc->real_vram_size)
498 mc->real_vram_size = limit;
dd7cc55a 499 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
500 mc->mc_vram_size >> 20, mc->vram_start,
501 mc->vram_end, mc->real_vram_size >> 20);
502}
771fe6b9 503
d594e46a
JG
504/**
505 * radeon_gtt_location - try to find GTT location
506 * @rdev: radeon device structure holding all necessary informations
507 * @mc: memory controller structure holding memory informations
508 *
509 * Function will place try to place GTT before or after VRAM.
510 *
511 * If GTT size is bigger than space left then we ajust GTT size.
512 * Thus function will never fails.
513 *
514 * FIXME: when reducing GTT size align new size on power of 2.
515 */
516void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
517{
518 u64 size_af, size_bf;
519
9ed8b1f9 520 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 521 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
522 if (size_bf > size_af) {
523 if (mc->gtt_size > size_bf) {
524 dev_warn(rdev->dev, "limiting GTT\n");
525 mc->gtt_size = size_bf;
771fe6b9 526 }
8d369bb1 527 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 528 } else {
d594e46a
JG
529 if (mc->gtt_size > size_af) {
530 dev_warn(rdev->dev, "limiting GTT\n");
531 mc->gtt_size = size_af;
532 }
8d369bb1 533 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 534 }
d594e46a 535 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 536 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 537 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
538}
539
771fe6b9
JG
540/*
541 * GPU helpers function.
542 */
0c195119
AD
543/**
544 * radeon_card_posted - check if the hw has already been initialized
545 *
546 * @rdev: radeon_device pointer
547 *
548 * Check if the asic has been initialized (all asics).
549 * Used at driver startup.
550 * Returns true if initialized or false if not.
551 */
9f022ddf 552bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
553{
554 uint32_t reg;
555
50a583f6 556 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 557 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
558 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
559 (rdev->family < CHIP_R600))
bcc65fd8
MG
560 return false;
561
2cf3a4fc
AD
562 if (ASIC_IS_NODCE(rdev))
563 goto check_memsize;
564
771fe6b9 565 /* first check CRTCs */
09fb8bd1 566 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
567 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
568 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
569 if (rdev->num_crtc >= 4) {
570 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
571 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
572 }
573 if (rdev->num_crtc >= 6) {
574 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
575 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
576 }
bcc1c2a1
AD
577 if (reg & EVERGREEN_CRTC_MASTER_EN)
578 return true;
579 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
580 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
581 RREG32(AVIVO_D2CRTC_CONTROL);
582 if (reg & AVIVO_CRTC_EN) {
583 return true;
584 }
585 } else {
586 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
587 RREG32(RADEON_CRTC2_GEN_CNTL);
588 if (reg & RADEON_CRTC_EN) {
589 return true;
590 }
591 }
592
2cf3a4fc 593check_memsize:
771fe6b9
JG
594 /* then check MEM_SIZE, in case the crtcs are off */
595 if (rdev->family >= CHIP_R600)
596 reg = RREG32(R600_CONFIG_MEMSIZE);
597 else
598 reg = RREG32(RADEON_CONFIG_MEMSIZE);
599
600 if (reg)
601 return true;
602
603 return false;
604
605}
606
0c195119
AD
607/**
608 * radeon_update_bandwidth_info - update display bandwidth params
609 *
610 * @rdev: radeon_device pointer
611 *
612 * Used when sclk/mclk are switched or display modes are set.
613 * params are used to calculate display watermarks (all asics)
614 */
f47299c5
AD
615void radeon_update_bandwidth_info(struct radeon_device *rdev)
616{
617 fixed20_12 a;
8807286e
AD
618 u32 sclk = rdev->pm.current_sclk;
619 u32 mclk = rdev->pm.current_mclk;
f47299c5 620
8807286e
AD
621 /* sclk/mclk in Mhz */
622 a.full = dfixed_const(100);
623 rdev->pm.sclk.full = dfixed_const(sclk);
624 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
625 rdev->pm.mclk.full = dfixed_const(mclk);
626 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 627
8807286e 628 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 629 a.full = dfixed_const(16);
f47299c5 630 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 631 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
632 }
633}
634
0c195119
AD
635/**
636 * radeon_boot_test_post_card - check and possibly initialize the hw
637 *
638 * @rdev: radeon_device pointer
639 *
640 * Check if the asic is initialized and if not, attempt to initialize
641 * it (all asics).
642 * Returns true if initialized or false if not.
643 */
72542d77
DA
644bool radeon_boot_test_post_card(struct radeon_device *rdev)
645{
646 if (radeon_card_posted(rdev))
647 return true;
648
649 if (rdev->bios) {
650 DRM_INFO("GPU not posted. posting now...\n");
651 if (rdev->is_atom_bios)
652 atom_asic_init(rdev->mode_info.atom_context);
653 else
654 radeon_combios_asic_init(rdev->ddev);
655 return true;
656 } else {
657 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
658 return false;
659 }
660}
661
0c195119
AD
662/**
663 * radeon_dummy_page_init - init dummy page used by the driver
664 *
665 * @rdev: radeon_device pointer
666 *
667 * Allocate the dummy page used by the driver (all asics).
668 * This dummy page is used by the driver as a filler for gart entries
669 * when pages are taken out of the GART
670 * Returns 0 on sucess, -ENOMEM on failure.
671 */
3ce0a23d
JG
672int radeon_dummy_page_init(struct radeon_device *rdev)
673{
82568565
DA
674 if (rdev->dummy_page.page)
675 return 0;
3ce0a23d
JG
676 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
677 if (rdev->dummy_page.page == NULL)
678 return -ENOMEM;
679 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
680 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
681 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
682 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
683 __free_page(rdev->dummy_page.page);
684 rdev->dummy_page.page = NULL;
685 return -ENOMEM;
686 }
687 return 0;
688}
689
0c195119
AD
690/**
691 * radeon_dummy_page_fini - free dummy page used by the driver
692 *
693 * @rdev: radeon_device pointer
694 *
695 * Frees the dummy page used by the driver (all asics).
696 */
3ce0a23d
JG
697void radeon_dummy_page_fini(struct radeon_device *rdev)
698{
699 if (rdev->dummy_page.page == NULL)
700 return;
701 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
702 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
703 __free_page(rdev->dummy_page.page);
704 rdev->dummy_page.page = NULL;
705}
706
771fe6b9 707
771fe6b9 708/* ATOM accessor methods */
0c195119
AD
709/*
710 * ATOM is an interpreted byte code stored in tables in the vbios. The
711 * driver registers callbacks to access registers and the interpreter
712 * in the driver parses the tables and executes then to program specific
713 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
714 * atombios.h, and atom.c
715 */
716
717/**
718 * cail_pll_read - read PLL register
719 *
720 * @info: atom card_info pointer
721 * @reg: PLL register offset
722 *
723 * Provides a PLL register accessor for the atom interpreter (r4xx+).
724 * Returns the value of the PLL register.
725 */
771fe6b9
JG
726static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
727{
728 struct radeon_device *rdev = info->dev->dev_private;
729 uint32_t r;
730
731 r = rdev->pll_rreg(rdev, reg);
732 return r;
733}
734
0c195119
AD
735/**
736 * cail_pll_write - write PLL register
737 *
738 * @info: atom card_info pointer
739 * @reg: PLL register offset
740 * @val: value to write to the pll register
741 *
742 * Provides a PLL register accessor for the atom interpreter (r4xx+).
743 */
771fe6b9
JG
744static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
745{
746 struct radeon_device *rdev = info->dev->dev_private;
747
748 rdev->pll_wreg(rdev, reg, val);
749}
750
0c195119
AD
751/**
752 * cail_mc_read - read MC (Memory Controller) register
753 *
754 * @info: atom card_info pointer
755 * @reg: MC register offset
756 *
757 * Provides an MC register accessor for the atom interpreter (r4xx+).
758 * Returns the value of the MC register.
759 */
771fe6b9
JG
760static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
761{
762 struct radeon_device *rdev = info->dev->dev_private;
763 uint32_t r;
764
765 r = rdev->mc_rreg(rdev, reg);
766 return r;
767}
768
0c195119
AD
769/**
770 * cail_mc_write - write MC (Memory Controller) register
771 *
772 * @info: atom card_info pointer
773 * @reg: MC register offset
774 * @val: value to write to the pll register
775 *
776 * Provides a MC register accessor for the atom interpreter (r4xx+).
777 */
771fe6b9
JG
778static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
779{
780 struct radeon_device *rdev = info->dev->dev_private;
781
782 rdev->mc_wreg(rdev, reg, val);
783}
784
0c195119
AD
785/**
786 * cail_reg_write - write MMIO register
787 *
788 * @info: atom card_info pointer
789 * @reg: MMIO register offset
790 * @val: value to write to the pll register
791 *
792 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
793 */
771fe6b9
JG
794static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
795{
796 struct radeon_device *rdev = info->dev->dev_private;
797
798 WREG32(reg*4, val);
799}
800
0c195119
AD
801/**
802 * cail_reg_read - read MMIO register
803 *
804 * @info: atom card_info pointer
805 * @reg: MMIO register offset
806 *
807 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
808 * Returns the value of the MMIO register.
809 */
771fe6b9
JG
810static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
811{
812 struct radeon_device *rdev = info->dev->dev_private;
813 uint32_t r;
814
815 r = RREG32(reg*4);
816 return r;
817}
818
0c195119
AD
819/**
820 * cail_ioreg_write - write IO register
821 *
822 * @info: atom card_info pointer
823 * @reg: IO register offset
824 * @val: value to write to the pll register
825 *
826 * Provides a IO register accessor for the atom interpreter (r4xx+).
827 */
351a52a2
AD
828static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
829{
830 struct radeon_device *rdev = info->dev->dev_private;
831
832 WREG32_IO(reg*4, val);
833}
834
0c195119
AD
835/**
836 * cail_ioreg_read - read IO register
837 *
838 * @info: atom card_info pointer
839 * @reg: IO register offset
840 *
841 * Provides an IO register accessor for the atom interpreter (r4xx+).
842 * Returns the value of the IO register.
843 */
351a52a2
AD
844static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
845{
846 struct radeon_device *rdev = info->dev->dev_private;
847 uint32_t r;
848
849 r = RREG32_IO(reg*4);
850 return r;
851}
852
0c195119
AD
853/**
854 * radeon_atombios_init - init the driver info and callbacks for atombios
855 *
856 * @rdev: radeon_device pointer
857 *
858 * Initializes the driver info and register access callbacks for the
859 * ATOM interpreter (r4xx+).
860 * Returns 0 on sucess, -ENOMEM on failure.
861 * Called at driver startup.
862 */
771fe6b9
JG
863int radeon_atombios_init(struct radeon_device *rdev)
864{
61c4b24b
MF
865 struct card_info *atom_card_info =
866 kzalloc(sizeof(struct card_info), GFP_KERNEL);
867
868 if (!atom_card_info)
869 return -ENOMEM;
870
871 rdev->mode_info.atom_card_info = atom_card_info;
872 atom_card_info->dev = rdev->ddev;
873 atom_card_info->reg_read = cail_reg_read;
874 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
875 /* needed for iio ops */
876 if (rdev->rio_mem) {
877 atom_card_info->ioreg_read = cail_ioreg_read;
878 atom_card_info->ioreg_write = cail_ioreg_write;
879 } else {
880 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
881 atom_card_info->ioreg_read = cail_reg_read;
882 atom_card_info->ioreg_write = cail_reg_write;
883 }
61c4b24b
MF
884 atom_card_info->mc_read = cail_mc_read;
885 atom_card_info->mc_write = cail_mc_write;
886 atom_card_info->pll_read = cail_pll_read;
887 atom_card_info->pll_write = cail_pll_write;
888
889 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
890 if (!rdev->mode_info.atom_context) {
891 radeon_atombios_fini(rdev);
892 return -ENOMEM;
893 }
894
c31ad97f 895 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 896 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 897 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
898 return 0;
899}
900
0c195119
AD
901/**
902 * radeon_atombios_fini - free the driver info and callbacks for atombios
903 *
904 * @rdev: radeon_device pointer
905 *
906 * Frees the driver info and register access callbacks for the ATOM
907 * interpreter (r4xx+).
908 * Called at driver shutdown.
909 */
771fe6b9
JG
910void radeon_atombios_fini(struct radeon_device *rdev)
911{
4a04a844
JG
912 if (rdev->mode_info.atom_context) {
913 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 914 }
0e34d094
TG
915 kfree(rdev->mode_info.atom_context);
916 rdev->mode_info.atom_context = NULL;
61c4b24b 917 kfree(rdev->mode_info.atom_card_info);
0e34d094 918 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
919}
920
0c195119
AD
921/* COMBIOS */
922/*
923 * COMBIOS is the bios format prior to ATOM. It provides
924 * command tables similar to ATOM, but doesn't have a unified
925 * parser. See radeon_combios.c
926 */
927
928/**
929 * radeon_combios_init - init the driver info for combios
930 *
931 * @rdev: radeon_device pointer
932 *
933 * Initializes the driver info for combios (r1xx-r3xx).
934 * Returns 0 on sucess.
935 * Called at driver startup.
936 */
771fe6b9
JG
937int radeon_combios_init(struct radeon_device *rdev)
938{
939 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
940 return 0;
941}
942
0c195119
AD
943/**
944 * radeon_combios_fini - free the driver info for combios
945 *
946 * @rdev: radeon_device pointer
947 *
948 * Frees the driver info for combios (r1xx-r3xx).
949 * Called at driver shutdown.
950 */
771fe6b9
JG
951void radeon_combios_fini(struct radeon_device *rdev)
952{
953}
954
0c195119
AD
955/* if we get transitioned to only one device, take VGA back */
956/**
957 * radeon_vga_set_decode - enable/disable vga decode
958 *
959 * @cookie: radeon_device pointer
960 * @state: enable/disable vga decode
961 *
962 * Enable/disable vga decode (all asics).
963 * Returns VGA resource flags.
964 */
28d52043
DA
965static unsigned int radeon_vga_set_decode(void *cookie, bool state)
966{
967 struct radeon_device *rdev = cookie;
28d52043
DA
968 radeon_vga_set_state(rdev, state);
969 if (state)
970 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
971 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
972 else
973 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
974}
c1176d6f 975
1bcb04f7
CK
976/**
977 * radeon_check_pot_argument - check that argument is a power of two
978 *
979 * @arg: value to check
980 *
981 * Validates that a certain argument is a power of two (all asics).
982 * Returns true if argument is valid.
983 */
984static bool radeon_check_pot_argument(int arg)
985{
986 return (arg & (arg - 1)) == 0;
987}
988
0c195119
AD
989/**
990 * radeon_check_arguments - validate module params
991 *
992 * @rdev: radeon_device pointer
993 *
994 * Validates certain module parameters and updates
995 * the associated values used by the driver (all asics).
996 */
1109ca09 997static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
998{
999 /* vramlimit must be a power of two */
1bcb04f7 1000 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1001 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1002 radeon_vram_limit);
1003 radeon_vram_limit = 0;
36421338 1004 }
1bcb04f7 1005
36421338 1006 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1007 if (radeon_gart_size < 32) {
36421338
JG
1008 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
1009 radeon_gart_size);
1010 radeon_gart_size = 512;
1bcb04f7
CK
1011
1012 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1013 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1014 radeon_gart_size);
1015 radeon_gart_size = 512;
36421338 1016 }
1bcb04f7
CK
1017 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1018
36421338
JG
1019 /* AGP mode can only be -1, 1, 2, 4, 8 */
1020 switch (radeon_agpmode) {
1021 case -1:
1022 case 0:
1023 case 1:
1024 case 2:
1025 case 4:
1026 case 8:
1027 break;
1028 default:
1029 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1030 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1031 radeon_agpmode = 0;
1032 break;
1033 }
1034}
1035
d1f9809e
ML
1036/**
1037 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1038 * needed for waking up.
1039 *
1040 * @pdev: pci dev pointer
1041 */
1042static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1043{
1044
1045 /* 6600m in a macbook pro */
1046 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1047 pdev->subsystem_device == 0x00e2) {
1048 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1049 return true;
1050 }
1051
1052 return false;
1053}
1054
0c195119
AD
1055/**
1056 * radeon_switcheroo_set_state - set switcheroo state
1057 *
1058 * @pdev: pci dev pointer
1059 * @state: vga switcheroo state
1060 *
1061 * Callback for the switcheroo driver. Suspends or resumes the
1062 * the asics before or after it is powered up using ACPI methods.
1063 */
6a9ee8af
DA
1064static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1065{
1066 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
1067 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1068 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1069 unsigned d3_delay = dev->pdev->d3_delay;
1070
6a9ee8af
DA
1071 printk(KERN_INFO "radeon: switched on\n");
1072 /* don't suspend or resume card normally */
5bcf719b 1073 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
1074
1075 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1076 dev->pdev->d3_delay = 20;
1077
6a9ee8af 1078 radeon_resume_kms(dev);
d1f9809e
ML
1079
1080 dev->pdev->d3_delay = d3_delay;
1081
5bcf719b 1082 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1083 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1084 } else {
1085 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1086 drm_kms_helper_poll_disable(dev);
5bcf719b 1087 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 1088 radeon_suspend_kms(dev, pmm);
5bcf719b 1089 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1090 }
1091}
1092
0c195119
AD
1093/**
1094 * radeon_switcheroo_can_switch - see if switcheroo state can change
1095 *
1096 * @pdev: pci dev pointer
1097 *
1098 * Callback for the switcheroo driver. Check of the switcheroo
1099 * state can be changed.
1100 * Returns true if the state can be changed, false if not.
1101 */
6a9ee8af
DA
1102static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1103{
1104 struct drm_device *dev = pci_get_drvdata(pdev);
1105 bool can_switch;
1106
1107 spin_lock(&dev->count_lock);
1108 can_switch = (dev->open_count == 0);
1109 spin_unlock(&dev->count_lock);
1110 return can_switch;
1111}
1112
26ec685f
TI
1113static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1114 .set_gpu_state = radeon_switcheroo_set_state,
1115 .reprobe = NULL,
1116 .can_switch = radeon_switcheroo_can_switch,
1117};
6a9ee8af 1118
0c195119
AD
1119/**
1120 * radeon_device_init - initialize the driver
1121 *
1122 * @rdev: radeon_device pointer
1123 * @pdev: drm dev pointer
1124 * @pdev: pci dev pointer
1125 * @flags: driver flags
1126 *
1127 * Initializes the driver info and hw (all asics).
1128 * Returns 0 for success or an error on failure.
1129 * Called at driver startup.
1130 */
771fe6b9
JG
1131int radeon_device_init(struct radeon_device *rdev,
1132 struct drm_device *ddev,
1133 struct pci_dev *pdev,
1134 uint32_t flags)
1135{
351a52a2 1136 int r, i;
ad49f501 1137 int dma_bits;
771fe6b9 1138
771fe6b9 1139 rdev->shutdown = false;
9f022ddf 1140 rdev->dev = &pdev->dev;
771fe6b9
JG
1141 rdev->ddev = ddev;
1142 rdev->pdev = pdev;
1143 rdev->flags = flags;
1144 rdev->family = flags & RADEON_FAMILY_MASK;
1145 rdev->is_atom_bios = false;
1146 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1147 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
733289c2 1148 rdev->accel_working = false;
8b25ed34
AD
1149 /* set up ring ids */
1150 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1151 rdev->ring[i].idx = i;
1152 }
1b5331d9 1153
d522d9cc
TR
1154 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1155 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1156 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1157
771fe6b9
JG
1158 /* mutex initialization are all done here so we
1159 * can recall function without having locking issues */
d6999bc7 1160 mutex_init(&rdev->ring_lock);
40bacf16 1161 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1162 atomic_set(&rdev->ih.lock, 0);
4c788679 1163 mutex_init(&rdev->gem.mutex);
c913e23a 1164 mutex_init(&rdev->pm.mutex);
6759a0a7 1165 mutex_init(&rdev->gpu_clock_mutex);
db7fce39 1166 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1167 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1168 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1169 r = radeon_gem_init(rdev);
1170 if (r)
1171 return r;
721604a1 1172 /* initialize vm here */
36ff39c4 1173 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1174 /* Adjust VM size here.
1175 * Currently set to 4GB ((1 << 20) 4k pages).
1176 * Max GPUVM size for cayman and SI is 40 bits.
1177 */
721604a1
JG
1178 rdev->vm_manager.max_pfn = 1 << 20;
1179 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1180
4aac0473
JG
1181 /* Set asic functions */
1182 r = radeon_asic_init(rdev);
36421338 1183 if (r)
4aac0473 1184 return r;
36421338 1185 radeon_check_arguments(rdev);
4aac0473 1186
f95df9ca
AD
1187 /* all of the newer IGP chips have an internal gart
1188 * However some rs4xx report as AGP, so remove that here.
1189 */
1190 if ((rdev->family >= CHIP_RS400) &&
1191 (rdev->flags & RADEON_IS_IGP)) {
1192 rdev->flags &= ~RADEON_IS_AGP;
1193 }
1194
30256a3f 1195 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1196 radeon_agp_disable(rdev);
771fe6b9
JG
1197 }
1198
9ed8b1f9
AD
1199 /* Set the internal MC address mask
1200 * This is the max address of the GPU's
1201 * internal address space.
1202 */
1203 if (rdev->family >= CHIP_CAYMAN)
1204 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1205 else if (rdev->family >= CHIP_CEDAR)
1206 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1207 else
1208 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1209
ad49f501
DA
1210 /* set DMA mask + need_dma32 flags.
1211 * PCIE - can handle 40-bits.
005a83f1 1212 * IGP - can handle 40-bits
ad49f501 1213 * AGP - generally dma32 is safest
005a83f1 1214 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1215 */
1216 rdev->need_dma32 = false;
1217 if (rdev->flags & RADEON_IS_AGP)
1218 rdev->need_dma32 = true;
005a83f1 1219 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1220 (rdev->family <= CHIP_RS740))
ad49f501
DA
1221 rdev->need_dma32 = true;
1222
1223 dma_bits = rdev->need_dma32 ? 32 : 40;
1224 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1225 if (r) {
62fff811 1226 rdev->need_dma32 = true;
c52494f6 1227 dma_bits = 32;
771fe6b9
JG
1228 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1229 }
c52494f6
KRW
1230 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1231 if (r) {
1232 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1233 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1234 }
771fe6b9
JG
1235
1236 /* Registers mapping */
1237 /* TODO: block userspace mapping of io register */
2c385151 1238 spin_lock_init(&rdev->mmio_idx_lock);
efad86db
AD
1239 if (rdev->family >= CHIP_BONAIRE) {
1240 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1241 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1242 } else {
1243 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1244 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1245 }
771fe6b9
JG
1246 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1247 if (rdev->rmmio == NULL) {
1248 return -ENOMEM;
1249 }
1250 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1251 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1252
75efdee1
AD
1253 /* doorbell bar mapping */
1254 if (rdev->family >= CHIP_BONAIRE)
1255 radeon_doorbell_init(rdev);
1256
351a52a2
AD
1257 /* io port mapping */
1258 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1259 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1260 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1261 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1262 break;
1263 }
1264 }
1265 if (rdev->rio_mem == NULL)
1266 DRM_ERROR("Unable to find PCI I/O BAR\n");
1267
28d52043 1268 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1269 /* this will fail for cards that aren't VGA class devices, just
1270 * ignore it */
1271 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
26ec685f 1272 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops);
28d52043 1273
3ce0a23d 1274 r = radeon_init(rdev);
b574f251 1275 if (r)
3ce0a23d 1276 return r;
3ce0a23d 1277
04eb2206
CK
1278 r = radeon_ib_ring_tests(rdev);
1279 if (r)
1280 DRM_ERROR("ib ring test failed (%d).\n", r);
1281
409851f4
JG
1282 r = radeon_gem_debugfs_init(rdev);
1283 if (r) {
1284 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1285 }
1286
b574f251
JG
1287 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1288 /* Acceleration not working on AGP card try again
1289 * with fallback to PCI or PCIE GART
1290 */
a2d07b74 1291 radeon_asic_reset(rdev);
b574f251
JG
1292 radeon_fini(rdev);
1293 radeon_agp_disable(rdev);
1294 r = radeon_init(rdev);
4aac0473
JG
1295 if (r)
1296 return r;
771fe6b9 1297 }
60a7e396 1298 if ((radeon_testing & 1)) {
ecc0b326
MD
1299 radeon_test_moves(rdev);
1300 }
60a7e396
CK
1301 if ((radeon_testing & 2)) {
1302 radeon_test_syncing(rdev);
1303 }
771fe6b9 1304 if (radeon_benchmarking) {
638dd7db 1305 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 1306 }
6cf8a3f5 1307 return 0;
771fe6b9
JG
1308}
1309
4d8bf9ae
CK
1310static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1311
0c195119
AD
1312/**
1313 * radeon_device_fini - tear down the driver
1314 *
1315 * @rdev: radeon_device pointer
1316 *
1317 * Tear down the driver info (all asics).
1318 * Called at driver shutdown.
1319 */
771fe6b9
JG
1320void radeon_device_fini(struct radeon_device *rdev)
1321{
771fe6b9
JG
1322 DRM_INFO("radeon: finishing device.\n");
1323 rdev->shutdown = true;
90aca4d2
JG
1324 /* evict vram memory */
1325 radeon_bo_evict_vram(rdev);
62a8ea3f 1326 radeon_fini(rdev);
6a9ee8af 1327 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1328 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1329 if (rdev->rio_mem)
1330 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1331 rdev->rio_mem = NULL;
771fe6b9
JG
1332 iounmap(rdev->rmmio);
1333 rdev->rmmio = NULL;
75efdee1
AD
1334 if (rdev->family >= CHIP_BONAIRE)
1335 radeon_doorbell_fini(rdev);
4d8bf9ae 1336 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1337}
1338
1339
1340/*
1341 * Suspend & resume.
1342 */
0c195119
AD
1343/**
1344 * radeon_suspend_kms - initiate device suspend
1345 *
1346 * @pdev: drm dev pointer
1347 * @state: suspend state
1348 *
1349 * Puts the hw in the suspend state (all asics).
1350 * Returns 0 for success or an error on failure.
1351 * Called at driver suspend.
1352 */
771fe6b9
JG
1353int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
1354{
875c1866 1355 struct radeon_device *rdev;
771fe6b9 1356 struct drm_crtc *crtc;
d8dcaa1d 1357 struct drm_connector *connector;
7465280c 1358 int i, r;
5f8f635e 1359 bool force_completion = false;
771fe6b9 1360
875c1866 1361 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1362 return -ENODEV;
1363 }
1364 if (state.event == PM_EVENT_PRETHAW) {
1365 return 0;
1366 }
875c1866
DJ
1367 rdev = dev->dev_private;
1368
5bcf719b 1369 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1370 return 0;
d8dcaa1d 1371
86698c20
SF
1372 drm_kms_helper_poll_disable(dev);
1373
d8dcaa1d
AD
1374 /* turn off display hw */
1375 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1376 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1377 }
1378
771fe6b9
JG
1379 /* unpin the front buffers */
1380 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1381 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1382 struct radeon_bo *robj;
771fe6b9
JG
1383
1384 if (rfb == NULL || rfb->obj == NULL) {
1385 continue;
1386 }
7e4d15d9 1387 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1388 /* don't unpin kernel fb objects */
1389 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1390 r = radeon_bo_reserve(robj, false);
38651674 1391 if (r == 0) {
4c788679
JG
1392 radeon_bo_unpin(robj);
1393 radeon_bo_unreserve(robj);
1394 }
771fe6b9
JG
1395 }
1396 }
1397 /* evict vram memory */
4c788679 1398 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1399
1400 mutex_lock(&rdev->ring_lock);
771fe6b9 1401 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1402 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1403 r = radeon_fence_wait_empty_locked(rdev, i);
1404 if (r) {
1405 /* delay GPU reset to resume */
1406 force_completion = true;
1407 }
1408 }
1409 if (force_completion) {
1410 radeon_fence_driver_force_completion(rdev);
1411 }
8a47cc9e 1412 mutex_unlock(&rdev->ring_lock);
771fe6b9 1413
f657c2a7
YZ
1414 radeon_save_bios_scratch_regs(rdev);
1415
ce8f5370 1416 radeon_pm_suspend(rdev);
62a8ea3f 1417 radeon_suspend(rdev);
d4877cf2 1418 radeon_hpd_fini(rdev);
771fe6b9 1419 /* evict remaining vram memory */
4c788679 1420 radeon_bo_evict_vram(rdev);
771fe6b9 1421
10b06122
JG
1422 radeon_agp_suspend(rdev);
1423
771fe6b9
JG
1424 pci_save_state(dev->pdev);
1425 if (state.event == PM_EVENT_SUSPEND) {
1426 /* Shut down the device */
1427 pci_disable_device(dev->pdev);
1428 pci_set_power_state(dev->pdev, PCI_D3hot);
1429 }
ac751efa 1430 console_lock();
38651674 1431 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 1432 console_unlock();
771fe6b9
JG
1433 return 0;
1434}
1435
0c195119
AD
1436/**
1437 * radeon_resume_kms - initiate device resume
1438 *
1439 * @pdev: drm dev pointer
1440 *
1441 * Bring the hw back to operating state (all asics).
1442 * Returns 0 for success or an error on failure.
1443 * Called at driver resume.
1444 */
771fe6b9
JG
1445int radeon_resume_kms(struct drm_device *dev)
1446{
09bdf591 1447 struct drm_connector *connector;
771fe6b9 1448 struct radeon_device *rdev = dev->dev_private;
04eb2206 1449 int r;
771fe6b9 1450
5bcf719b 1451 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1452 return 0;
1453
ac751efa 1454 console_lock();
771fe6b9
JG
1455 pci_set_power_state(dev->pdev, PCI_D0);
1456 pci_restore_state(dev->pdev);
1457 if (pci_enable_device(dev->pdev)) {
ac751efa 1458 console_unlock();
771fe6b9
JG
1459 return -1;
1460 }
0ebf1717
DA
1461 /* resume AGP if in use */
1462 radeon_agp_resume(rdev);
62a8ea3f 1463 radeon_resume(rdev);
04eb2206
CK
1464
1465 r = radeon_ib_ring_tests(rdev);
1466 if (r)
1467 DRM_ERROR("ib ring test failed (%d).\n", r);
1468
ce8f5370 1469 radeon_pm_resume(rdev);
f657c2a7 1470 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1471
38651674 1472 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 1473 console_unlock();
771fe6b9 1474
3fa47d9e
AD
1475 /* init dig PHYs, disp eng pll */
1476 if (rdev->is_atom_bios) {
ac89af1e 1477 radeon_atom_encoder_init(rdev);
f3f1f03e 1478 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1479 /* turn on the BL */
1480 if (rdev->mode_info.bl_encoder) {
1481 u8 bl_level = radeon_get_backlight_level(rdev,
1482 rdev->mode_info.bl_encoder);
1483 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1484 bl_level);
1485 }
3fa47d9e 1486 }
d4877cf2
AD
1487 /* reset hpd state */
1488 radeon_hpd_init(rdev);
771fe6b9
JG
1489 /* blat the mode back in */
1490 drm_helper_resume_force_mode(dev);
a93f344d
AD
1491 /* turn on display hw */
1492 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1493 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1494 }
86698c20
SF
1495
1496 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1497 return 0;
1498}
1499
0c195119
AD
1500/**
1501 * radeon_gpu_reset - reset the asic
1502 *
1503 * @rdev: radeon device pointer
1504 *
1505 * Attempt the reset the GPU if it has hung (all asics).
1506 * Returns 0 for success or an error on failure.
1507 */
90aca4d2
JG
1508int radeon_gpu_reset(struct radeon_device *rdev)
1509{
55d7c221
CK
1510 unsigned ring_sizes[RADEON_NUM_RINGS];
1511 uint32_t *ring_data[RADEON_NUM_RINGS];
1512
1513 bool saved = false;
1514
1515 int i, r;
8fd1b84c 1516 int resched;
90aca4d2 1517
dee53e7f 1518 down_write(&rdev->exclusive_lock);
90aca4d2 1519 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1520 /* block TTM */
1521 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
1522 radeon_suspend(rdev);
1523
55d7c221
CK
1524 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1525 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1526 &ring_data[i]);
1527 if (ring_sizes[i]) {
1528 saved = true;
1529 dev_info(rdev->dev, "Saved %d dwords of commands "
1530 "on ring %d.\n", ring_sizes[i], i);
1531 }
1532 }
1533
1534retry:
90aca4d2
JG
1535 r = radeon_asic_reset(rdev);
1536 if (!r) {
55d7c221 1537 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1538 radeon_resume(rdev);
55d7c221 1539 }
04eb2206 1540
55d7c221 1541 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1542
55d7c221
CK
1543 if (!r) {
1544 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1545 radeon_ring_restore(rdev, &rdev->ring[i],
1546 ring_sizes[i], ring_data[i]);
f54b350d
CK
1547 ring_sizes[i] = 0;
1548 ring_data[i] = NULL;
55d7c221
CK
1549 }
1550
1551 r = radeon_ib_ring_tests(rdev);
1552 if (r) {
1553 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1554 if (saved) {
f54b350d 1555 saved = false;
55d7c221
CK
1556 radeon_suspend(rdev);
1557 goto retry;
1558 }
1559 }
1560 } else {
76903b96 1561 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1562 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1563 kfree(ring_data[i]);
1564 }
90aca4d2 1565 }
7a1619b9 1566
d3493574
JG
1567 drm_helper_resume_force_mode(rdev->ddev);
1568
55d7c221 1569 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1570 if (r) {
1571 /* bad news, how to tell it to userspace ? */
1572 dev_info(rdev->dev, "GPU reset failed\n");
1573 }
1574
dee53e7f 1575 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1576 return r;
1577}
1578
771fe6b9
JG
1579
1580/*
1581 * Debugfs
1582 */
771fe6b9
JG
1583int radeon_debugfs_add_files(struct radeon_device *rdev,
1584 struct drm_info_list *files,
1585 unsigned nfiles)
1586{
1587 unsigned i;
1588
4d8bf9ae
CK
1589 for (i = 0; i < rdev->debugfs_count; i++) {
1590 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1591 /* Already registered */
1592 return 0;
1593 }
1594 }
c245cb9e 1595
4d8bf9ae 1596 i = rdev->debugfs_count + 1;
c245cb9e
MW
1597 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1598 DRM_ERROR("Reached maximum number of debugfs components.\n");
1599 DRM_ERROR("Report so we increase "
1600 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1601 return -EINVAL;
1602 }
4d8bf9ae
CK
1603 rdev->debugfs[rdev->debugfs_count].files = files;
1604 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1605 rdev->debugfs_count = i;
771fe6b9
JG
1606#if defined(CONFIG_DEBUG_FS)
1607 drm_debugfs_create_files(files, nfiles,
1608 rdev->ddev->control->debugfs_root,
1609 rdev->ddev->control);
1610 drm_debugfs_create_files(files, nfiles,
1611 rdev->ddev->primary->debugfs_root,
1612 rdev->ddev->primary);
1613#endif
1614 return 0;
1615}
1616
4d8bf9ae
CK
1617static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1618{
1619#if defined(CONFIG_DEBUG_FS)
1620 unsigned i;
1621
1622 for (i = 0; i < rdev->debugfs_count; i++) {
1623 drm_debugfs_remove_files(rdev->debugfs[i].files,
1624 rdev->debugfs[i].num_files,
1625 rdev->ddev->control);
1626 drm_debugfs_remove_files(rdev->debugfs[i].files,
1627 rdev->debugfs[i].num_files,
1628 rdev->ddev->primary);
1629 }
1630#endif
1631}
1632
771fe6b9
JG
1633#if defined(CONFIG_DEBUG_FS)
1634int radeon_debugfs_init(struct drm_minor *minor)
1635{
1636 return 0;
1637}
1638
1639void radeon_debugfs_cleanup(struct drm_minor *minor)
1640{
771fe6b9
JG
1641}
1642#endif
This page took 0.33043 seconds and 5 git commands to generate.