drm/radeon: fix-up some float to fixed conversion thinkos
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
624d3524 96 "OLAND",
b5d9d726 97 "HAINAN",
6eac752e
AD
98 "BONAIRE",
99 "KAVERI",
100 "KABINI",
1b5331d9
JG
101 "LAST",
102};
103
10ebc0bc
DA
104#if defined(CONFIG_VGA_SWITCHEROO)
105bool radeon_is_px(void);
106#else
107static inline bool radeon_is_px(void) { return false; }
108#endif
109
2e1b65f9
AD
110/**
111 * radeon_program_register_sequence - program an array of registers.
112 *
113 * @rdev: radeon_device pointer
114 * @registers: pointer to the register array
115 * @array_size: size of the register array
116 *
117 * Programs an array or registers with and and or masks.
118 * This is a helper for setting golden registers.
119 */
120void radeon_program_register_sequence(struct radeon_device *rdev,
121 const u32 *registers,
122 const u32 array_size)
123{
124 u32 tmp, reg, and_mask, or_mask;
125 int i;
126
127 if (array_size % 3)
128 return;
129
130 for (i = 0; i < array_size; i +=3) {
131 reg = registers[i + 0];
132 and_mask = registers[i + 1];
133 or_mask = registers[i + 2];
134
135 if (and_mask == 0xffffffff) {
136 tmp = or_mask;
137 } else {
138 tmp = RREG32(reg);
139 tmp &= ~and_mask;
140 tmp |= or_mask;
141 }
142 WREG32(reg, tmp);
143 }
144}
145
0c195119
AD
146/**
147 * radeon_surface_init - Clear GPU surface registers.
148 *
149 * @rdev: radeon_device pointer
150 *
151 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 152 */
3ce0a23d 153void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
154{
155 /* FIXME: check this out */
156 if (rdev->family < CHIP_R600) {
157 int i;
158
550e2d92
DA
159 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
160 if (rdev->surface_regs[i].bo)
161 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
162 else
163 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 164 }
e024e110
DA
165 /* enable surfaces */
166 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
167 }
168}
169
771fe6b9
JG
170/*
171 * GPU scratch registers helpers function.
172 */
0c195119
AD
173/**
174 * radeon_scratch_init - Init scratch register driver information.
175 *
176 * @rdev: radeon_device pointer
177 *
178 * Init CP scratch register driver information (r1xx-r5xx)
179 */
3ce0a23d 180void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
181{
182 int i;
183
184 /* FIXME: check this out */
185 if (rdev->family < CHIP_R300) {
186 rdev->scratch.num_reg = 5;
187 } else {
188 rdev->scratch.num_reg = 7;
189 }
724c80e1 190 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
191 for (i = 0; i < rdev->scratch.num_reg; i++) {
192 rdev->scratch.free[i] = true;
724c80e1 193 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
194 }
195}
196
0c195119
AD
197/**
198 * radeon_scratch_get - Allocate a scratch register
199 *
200 * @rdev: radeon_device pointer
201 * @reg: scratch register mmio offset
202 *
203 * Allocate a CP scratch register for use by the driver (all asics).
204 * Returns 0 on success or -EINVAL on failure.
205 */
771fe6b9
JG
206int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
207{
208 int i;
209
210 for (i = 0; i < rdev->scratch.num_reg; i++) {
211 if (rdev->scratch.free[i]) {
212 rdev->scratch.free[i] = false;
213 *reg = rdev->scratch.reg[i];
214 return 0;
215 }
216 }
217 return -EINVAL;
218}
219
0c195119
AD
220/**
221 * radeon_scratch_free - Free a scratch register
222 *
223 * @rdev: radeon_device pointer
224 * @reg: scratch register mmio offset
225 *
226 * Free a CP scratch register allocated for use by the driver (all asics)
227 */
771fe6b9
JG
228void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
229{
230 int i;
231
232 for (i = 0; i < rdev->scratch.num_reg; i++) {
233 if (rdev->scratch.reg[i] == reg) {
234 rdev->scratch.free[i] = true;
235 return;
236 }
237 }
238}
239
75efdee1
AD
240/*
241 * GPU doorbell aperture helpers function.
242 */
243/**
244 * radeon_doorbell_init - Init doorbell driver information.
245 *
246 * @rdev: radeon_device pointer
247 *
248 * Init doorbell driver information (CIK)
249 * Returns 0 on success, error on failure.
250 */
251int radeon_doorbell_init(struct radeon_device *rdev)
252{
253 int i;
254
255 /* doorbell bar mapping */
256 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
257 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
258
259 /* limit to 4 MB for now */
260 if (rdev->doorbell.size > (4 * 1024 * 1024))
261 rdev->doorbell.size = 4 * 1024 * 1024;
262
263 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.size);
264 if (rdev->doorbell.ptr == NULL) {
265 return -ENOMEM;
266 }
267 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
268 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
269
270 rdev->doorbell.num_pages = rdev->doorbell.size / PAGE_SIZE;
271
272 for (i = 0; i < rdev->doorbell.num_pages; i++) {
273 rdev->doorbell.free[i] = true;
274 }
275 return 0;
276}
277
278/**
279 * radeon_doorbell_fini - Tear down doorbell driver information.
280 *
281 * @rdev: radeon_device pointer
282 *
283 * Tear down doorbell driver information (CIK)
284 */
285void radeon_doorbell_fini(struct radeon_device *rdev)
286{
287 iounmap(rdev->doorbell.ptr);
288 rdev->doorbell.ptr = NULL;
289}
290
291/**
292 * radeon_doorbell_get - Allocate a doorbell page
293 *
294 * @rdev: radeon_device pointer
295 * @doorbell: doorbell page number
296 *
297 * Allocate a doorbell page for use by the driver (all asics).
298 * Returns 0 on success or -EINVAL on failure.
299 */
300int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
301{
302 int i;
303
304 for (i = 0; i < rdev->doorbell.num_pages; i++) {
305 if (rdev->doorbell.free[i]) {
306 rdev->doorbell.free[i] = false;
307 *doorbell = i;
308 return 0;
309 }
310 }
311 return -EINVAL;
312}
313
314/**
315 * radeon_doorbell_free - Free a doorbell page
316 *
317 * @rdev: radeon_device pointer
318 * @doorbell: doorbell page number
319 *
320 * Free a doorbell page allocated for use by the driver (all asics)
321 */
322void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
323{
324 if (doorbell < rdev->doorbell.num_pages)
325 rdev->doorbell.free[doorbell] = true;
326}
327
0c195119
AD
328/*
329 * radeon_wb_*()
330 * Writeback is the the method by which the the GPU updates special pages
331 * in memory with the status of certain GPU events (fences, ring pointers,
332 * etc.).
333 */
334
335/**
336 * radeon_wb_disable - Disable Writeback
337 *
338 * @rdev: radeon_device pointer
339 *
340 * Disables Writeback (all asics). Used for suspend.
341 */
724c80e1
AD
342void radeon_wb_disable(struct radeon_device *rdev)
343{
724c80e1
AD
344 rdev->wb.enabled = false;
345}
346
0c195119
AD
347/**
348 * radeon_wb_fini - Disable Writeback and free memory
349 *
350 * @rdev: radeon_device pointer
351 *
352 * Disables Writeback and frees the Writeback memory (all asics).
353 * Used at driver shutdown.
354 */
724c80e1
AD
355void radeon_wb_fini(struct radeon_device *rdev)
356{
357 radeon_wb_disable(rdev);
358 if (rdev->wb.wb_obj) {
089920f2
JG
359 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
360 radeon_bo_kunmap(rdev->wb.wb_obj);
361 radeon_bo_unpin(rdev->wb.wb_obj);
362 radeon_bo_unreserve(rdev->wb.wb_obj);
363 }
724c80e1
AD
364 radeon_bo_unref(&rdev->wb.wb_obj);
365 rdev->wb.wb = NULL;
366 rdev->wb.wb_obj = NULL;
367 }
368}
369
0c195119
AD
370/**
371 * radeon_wb_init- Init Writeback driver info and allocate memory
372 *
373 * @rdev: radeon_device pointer
374 *
375 * Disables Writeback and frees the Writeback memory (all asics).
376 * Used at driver startup.
377 * Returns 0 on success or an -error on failure.
378 */
724c80e1
AD
379int radeon_wb_init(struct radeon_device *rdev)
380{
381 int r;
382
383 if (rdev->wb.wb_obj == NULL) {
441921d5 384 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
40f5cf99 385 RADEON_GEM_DOMAIN_GTT, NULL, &rdev->wb.wb_obj);
724c80e1
AD
386 if (r) {
387 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
388 return r;
389 }
089920f2
JG
390 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
391 if (unlikely(r != 0)) {
392 radeon_wb_fini(rdev);
393 return r;
394 }
395 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
396 &rdev->wb.gpu_addr);
397 if (r) {
398 radeon_bo_unreserve(rdev->wb.wb_obj);
399 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
400 radeon_wb_fini(rdev);
401 return r;
402 }
403 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 404 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
405 if (r) {
406 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
407 radeon_wb_fini(rdev);
408 return r;
409 }
724c80e1
AD
410 }
411
e6ba7599
AD
412 /* clear wb memory */
413 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
414 /* disable event_write fences */
415 rdev->wb.use_event = false;
724c80e1 416 /* disabled via module param */
3b7a2b24 417 if (radeon_no_wb == 1) {
724c80e1 418 rdev->wb.enabled = false;
3b7a2b24 419 } else {
724c80e1 420 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
421 /* often unreliable on AGP */
422 rdev->wb.enabled = false;
423 } else if (rdev->family < CHIP_R300) {
424 /* often unreliable on pre-r300 */
724c80e1 425 rdev->wb.enabled = false;
d0f8a854 426 } else {
724c80e1 427 rdev->wb.enabled = true;
d0f8a854 428 /* event_write fences are only available on r600+ */
3b7a2b24 429 if (rdev->family >= CHIP_R600) {
d0f8a854 430 rdev->wb.use_event = true;
3b7a2b24 431 }
d0f8a854 432 }
724c80e1 433 }
c994ead6
AD
434 /* always use writeback/events on NI, APUs */
435 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
436 rdev->wb.enabled = true;
437 rdev->wb.use_event = true;
438 }
724c80e1
AD
439
440 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
441
442 return 0;
443}
444
d594e46a
JG
445/**
446 * radeon_vram_location - try to find VRAM location
447 * @rdev: radeon device structure holding all necessary informations
448 * @mc: memory controller structure holding memory informations
449 * @base: base address at which to put VRAM
450 *
451 * Function will place try to place VRAM at base address provided
452 * as parameter (which is so far either PCI aperture address or
453 * for IGP TOM base address).
454 *
455 * If there is not enough space to fit the unvisible VRAM in the 32bits
456 * address space then we limit the VRAM size to the aperture.
457 *
458 * If we are using AGP and if the AGP aperture doesn't allow us to have
459 * room for all the VRAM than we restrict the VRAM to the PCI aperture
460 * size and print a warning.
461 *
462 * This function will never fails, worst case are limiting VRAM.
463 *
464 * Note: GTT start, end, size should be initialized before calling this
465 * function on AGP platform.
466 *
25985edc 467 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
468 * this shouldn't be a problem as we are using the PCI aperture as a reference.
469 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
470 * not IGP.
471 *
472 * Note: we use mc_vram_size as on some board we need to program the mc to
473 * cover the whole aperture even if VRAM size is inferior to aperture size
474 * Novell bug 204882 + along with lots of ubuntu ones
475 *
476 * Note: when limiting vram it's safe to overwritte real_vram_size because
477 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
478 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
479 * ones)
480 *
481 * Note: IGP TOM addr should be the same as the aperture addr, we don't
482 * explicitly check for that thought.
483 *
484 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 485 */
d594e46a 486void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 487{
1bcb04f7
CK
488 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
489
d594e46a 490 mc->vram_start = base;
9ed8b1f9 491 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
492 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
493 mc->real_vram_size = mc->aper_size;
494 mc->mc_vram_size = mc->aper_size;
495 }
496 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 497 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
498 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
499 mc->real_vram_size = mc->aper_size;
500 mc->mc_vram_size = mc->aper_size;
501 }
502 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
503 if (limit && limit < mc->real_vram_size)
504 mc->real_vram_size = limit;
dd7cc55a 505 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
506 mc->mc_vram_size >> 20, mc->vram_start,
507 mc->vram_end, mc->real_vram_size >> 20);
508}
771fe6b9 509
d594e46a
JG
510/**
511 * radeon_gtt_location - try to find GTT location
512 * @rdev: radeon device structure holding all necessary informations
513 * @mc: memory controller structure holding memory informations
514 *
515 * Function will place try to place GTT before or after VRAM.
516 *
517 * If GTT size is bigger than space left then we ajust GTT size.
518 * Thus function will never fails.
519 *
520 * FIXME: when reducing GTT size align new size on power of 2.
521 */
522void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
523{
524 u64 size_af, size_bf;
525
9ed8b1f9 526 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 527 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
528 if (size_bf > size_af) {
529 if (mc->gtt_size > size_bf) {
530 dev_warn(rdev->dev, "limiting GTT\n");
531 mc->gtt_size = size_bf;
771fe6b9 532 }
8d369bb1 533 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 534 } else {
d594e46a
JG
535 if (mc->gtt_size > size_af) {
536 dev_warn(rdev->dev, "limiting GTT\n");
537 mc->gtt_size = size_af;
538 }
8d369bb1 539 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 540 }
d594e46a 541 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 542 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 543 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
544}
545
771fe6b9
JG
546/*
547 * GPU helpers function.
548 */
0c195119
AD
549/**
550 * radeon_card_posted - check if the hw has already been initialized
551 *
552 * @rdev: radeon_device pointer
553 *
554 * Check if the asic has been initialized (all asics).
555 * Used at driver startup.
556 * Returns true if initialized or false if not.
557 */
9f022ddf 558bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
559{
560 uint32_t reg;
561
50a583f6 562 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 563 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
564 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
565 (rdev->family < CHIP_R600))
bcc65fd8
MG
566 return false;
567
2cf3a4fc
AD
568 if (ASIC_IS_NODCE(rdev))
569 goto check_memsize;
570
771fe6b9 571 /* first check CRTCs */
09fb8bd1 572 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
573 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
574 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
575 if (rdev->num_crtc >= 4) {
576 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
577 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
578 }
579 if (rdev->num_crtc >= 6) {
580 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
581 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
582 }
bcc1c2a1
AD
583 if (reg & EVERGREEN_CRTC_MASTER_EN)
584 return true;
585 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
586 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
587 RREG32(AVIVO_D2CRTC_CONTROL);
588 if (reg & AVIVO_CRTC_EN) {
589 return true;
590 }
591 } else {
592 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
593 RREG32(RADEON_CRTC2_GEN_CNTL);
594 if (reg & RADEON_CRTC_EN) {
595 return true;
596 }
597 }
598
2cf3a4fc 599check_memsize:
771fe6b9
JG
600 /* then check MEM_SIZE, in case the crtcs are off */
601 if (rdev->family >= CHIP_R600)
602 reg = RREG32(R600_CONFIG_MEMSIZE);
603 else
604 reg = RREG32(RADEON_CONFIG_MEMSIZE);
605
606 if (reg)
607 return true;
608
609 return false;
610
611}
612
0c195119
AD
613/**
614 * radeon_update_bandwidth_info - update display bandwidth params
615 *
616 * @rdev: radeon_device pointer
617 *
618 * Used when sclk/mclk are switched or display modes are set.
619 * params are used to calculate display watermarks (all asics)
620 */
f47299c5
AD
621void radeon_update_bandwidth_info(struct radeon_device *rdev)
622{
623 fixed20_12 a;
8807286e
AD
624 u32 sclk = rdev->pm.current_sclk;
625 u32 mclk = rdev->pm.current_mclk;
f47299c5 626
8807286e
AD
627 /* sclk/mclk in Mhz */
628 a.full = dfixed_const(100);
629 rdev->pm.sclk.full = dfixed_const(sclk);
630 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
631 rdev->pm.mclk.full = dfixed_const(mclk);
632 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 633
8807286e 634 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 635 a.full = dfixed_const(16);
f47299c5 636 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 637 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
638 }
639}
640
0c195119
AD
641/**
642 * radeon_boot_test_post_card - check and possibly initialize the hw
643 *
644 * @rdev: radeon_device pointer
645 *
646 * Check if the asic is initialized and if not, attempt to initialize
647 * it (all asics).
648 * Returns true if initialized or false if not.
649 */
72542d77
DA
650bool radeon_boot_test_post_card(struct radeon_device *rdev)
651{
652 if (radeon_card_posted(rdev))
653 return true;
654
655 if (rdev->bios) {
656 DRM_INFO("GPU not posted. posting now...\n");
657 if (rdev->is_atom_bios)
658 atom_asic_init(rdev->mode_info.atom_context);
659 else
660 radeon_combios_asic_init(rdev->ddev);
661 return true;
662 } else {
663 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
664 return false;
665 }
666}
667
0c195119
AD
668/**
669 * radeon_dummy_page_init - init dummy page used by the driver
670 *
671 * @rdev: radeon_device pointer
672 *
673 * Allocate the dummy page used by the driver (all asics).
674 * This dummy page is used by the driver as a filler for gart entries
675 * when pages are taken out of the GART
676 * Returns 0 on sucess, -ENOMEM on failure.
677 */
3ce0a23d
JG
678int radeon_dummy_page_init(struct radeon_device *rdev)
679{
82568565
DA
680 if (rdev->dummy_page.page)
681 return 0;
3ce0a23d
JG
682 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
683 if (rdev->dummy_page.page == NULL)
684 return -ENOMEM;
685 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
686 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
687 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
688 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
689 __free_page(rdev->dummy_page.page);
690 rdev->dummy_page.page = NULL;
691 return -ENOMEM;
692 }
693 return 0;
694}
695
0c195119
AD
696/**
697 * radeon_dummy_page_fini - free dummy page used by the driver
698 *
699 * @rdev: radeon_device pointer
700 *
701 * Frees the dummy page used by the driver (all asics).
702 */
3ce0a23d
JG
703void radeon_dummy_page_fini(struct radeon_device *rdev)
704{
705 if (rdev->dummy_page.page == NULL)
706 return;
707 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
708 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
709 __free_page(rdev->dummy_page.page);
710 rdev->dummy_page.page = NULL;
711}
712
771fe6b9 713
771fe6b9 714/* ATOM accessor methods */
0c195119
AD
715/*
716 * ATOM is an interpreted byte code stored in tables in the vbios. The
717 * driver registers callbacks to access registers and the interpreter
718 * in the driver parses the tables and executes then to program specific
719 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
720 * atombios.h, and atom.c
721 */
722
723/**
724 * cail_pll_read - read PLL register
725 *
726 * @info: atom card_info pointer
727 * @reg: PLL register offset
728 *
729 * Provides a PLL register accessor for the atom interpreter (r4xx+).
730 * Returns the value of the PLL register.
731 */
771fe6b9
JG
732static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
733{
734 struct radeon_device *rdev = info->dev->dev_private;
735 uint32_t r;
736
737 r = rdev->pll_rreg(rdev, reg);
738 return r;
739}
740
0c195119
AD
741/**
742 * cail_pll_write - write PLL register
743 *
744 * @info: atom card_info pointer
745 * @reg: PLL register offset
746 * @val: value to write to the pll register
747 *
748 * Provides a PLL register accessor for the atom interpreter (r4xx+).
749 */
771fe6b9
JG
750static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
751{
752 struct radeon_device *rdev = info->dev->dev_private;
753
754 rdev->pll_wreg(rdev, reg, val);
755}
756
0c195119
AD
757/**
758 * cail_mc_read - read MC (Memory Controller) register
759 *
760 * @info: atom card_info pointer
761 * @reg: MC register offset
762 *
763 * Provides an MC register accessor for the atom interpreter (r4xx+).
764 * Returns the value of the MC register.
765 */
771fe6b9
JG
766static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
767{
768 struct radeon_device *rdev = info->dev->dev_private;
769 uint32_t r;
770
771 r = rdev->mc_rreg(rdev, reg);
772 return r;
773}
774
0c195119
AD
775/**
776 * cail_mc_write - write MC (Memory Controller) register
777 *
778 * @info: atom card_info pointer
779 * @reg: MC register offset
780 * @val: value to write to the pll register
781 *
782 * Provides a MC register accessor for the atom interpreter (r4xx+).
783 */
771fe6b9
JG
784static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
785{
786 struct radeon_device *rdev = info->dev->dev_private;
787
788 rdev->mc_wreg(rdev, reg, val);
789}
790
0c195119
AD
791/**
792 * cail_reg_write - write MMIO register
793 *
794 * @info: atom card_info pointer
795 * @reg: MMIO register offset
796 * @val: value to write to the pll register
797 *
798 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
799 */
771fe6b9
JG
800static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
801{
802 struct radeon_device *rdev = info->dev->dev_private;
803
804 WREG32(reg*4, val);
805}
806
0c195119
AD
807/**
808 * cail_reg_read - read MMIO register
809 *
810 * @info: atom card_info pointer
811 * @reg: MMIO register offset
812 *
813 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
814 * Returns the value of the MMIO register.
815 */
771fe6b9
JG
816static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
817{
818 struct radeon_device *rdev = info->dev->dev_private;
819 uint32_t r;
820
821 r = RREG32(reg*4);
822 return r;
823}
824
0c195119
AD
825/**
826 * cail_ioreg_write - write IO register
827 *
828 * @info: atom card_info pointer
829 * @reg: IO register offset
830 * @val: value to write to the pll register
831 *
832 * Provides a IO register accessor for the atom interpreter (r4xx+).
833 */
351a52a2
AD
834static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
835{
836 struct radeon_device *rdev = info->dev->dev_private;
837
838 WREG32_IO(reg*4, val);
839}
840
0c195119
AD
841/**
842 * cail_ioreg_read - read IO register
843 *
844 * @info: atom card_info pointer
845 * @reg: IO register offset
846 *
847 * Provides an IO register accessor for the atom interpreter (r4xx+).
848 * Returns the value of the IO register.
849 */
351a52a2
AD
850static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
851{
852 struct radeon_device *rdev = info->dev->dev_private;
853 uint32_t r;
854
855 r = RREG32_IO(reg*4);
856 return r;
857}
858
0c195119
AD
859/**
860 * radeon_atombios_init - init the driver info and callbacks for atombios
861 *
862 * @rdev: radeon_device pointer
863 *
864 * Initializes the driver info and register access callbacks for the
865 * ATOM interpreter (r4xx+).
866 * Returns 0 on sucess, -ENOMEM on failure.
867 * Called at driver startup.
868 */
771fe6b9
JG
869int radeon_atombios_init(struct radeon_device *rdev)
870{
61c4b24b
MF
871 struct card_info *atom_card_info =
872 kzalloc(sizeof(struct card_info), GFP_KERNEL);
873
874 if (!atom_card_info)
875 return -ENOMEM;
876
877 rdev->mode_info.atom_card_info = atom_card_info;
878 atom_card_info->dev = rdev->ddev;
879 atom_card_info->reg_read = cail_reg_read;
880 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
881 /* needed for iio ops */
882 if (rdev->rio_mem) {
883 atom_card_info->ioreg_read = cail_ioreg_read;
884 atom_card_info->ioreg_write = cail_ioreg_write;
885 } else {
886 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
887 atom_card_info->ioreg_read = cail_reg_read;
888 atom_card_info->ioreg_write = cail_reg_write;
889 }
61c4b24b
MF
890 atom_card_info->mc_read = cail_mc_read;
891 atom_card_info->mc_write = cail_mc_write;
892 atom_card_info->pll_read = cail_pll_read;
893 atom_card_info->pll_write = cail_pll_write;
894
895 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
896 if (!rdev->mode_info.atom_context) {
897 radeon_atombios_fini(rdev);
898 return -ENOMEM;
899 }
900
c31ad97f 901 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 902 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 903 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
904 return 0;
905}
906
0c195119
AD
907/**
908 * radeon_atombios_fini - free the driver info and callbacks for atombios
909 *
910 * @rdev: radeon_device pointer
911 *
912 * Frees the driver info and register access callbacks for the ATOM
913 * interpreter (r4xx+).
914 * Called at driver shutdown.
915 */
771fe6b9
JG
916void radeon_atombios_fini(struct radeon_device *rdev)
917{
4a04a844
JG
918 if (rdev->mode_info.atom_context) {
919 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 920 }
0e34d094
TG
921 kfree(rdev->mode_info.atom_context);
922 rdev->mode_info.atom_context = NULL;
61c4b24b 923 kfree(rdev->mode_info.atom_card_info);
0e34d094 924 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
925}
926
0c195119
AD
927/* COMBIOS */
928/*
929 * COMBIOS is the bios format prior to ATOM. It provides
930 * command tables similar to ATOM, but doesn't have a unified
931 * parser. See radeon_combios.c
932 */
933
934/**
935 * radeon_combios_init - init the driver info for combios
936 *
937 * @rdev: radeon_device pointer
938 *
939 * Initializes the driver info for combios (r1xx-r3xx).
940 * Returns 0 on sucess.
941 * Called at driver startup.
942 */
771fe6b9
JG
943int radeon_combios_init(struct radeon_device *rdev)
944{
945 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
946 return 0;
947}
948
0c195119
AD
949/**
950 * radeon_combios_fini - free the driver info for combios
951 *
952 * @rdev: radeon_device pointer
953 *
954 * Frees the driver info for combios (r1xx-r3xx).
955 * Called at driver shutdown.
956 */
771fe6b9
JG
957void radeon_combios_fini(struct radeon_device *rdev)
958{
959}
960
0c195119
AD
961/* if we get transitioned to only one device, take VGA back */
962/**
963 * radeon_vga_set_decode - enable/disable vga decode
964 *
965 * @cookie: radeon_device pointer
966 * @state: enable/disable vga decode
967 *
968 * Enable/disable vga decode (all asics).
969 * Returns VGA resource flags.
970 */
28d52043
DA
971static unsigned int radeon_vga_set_decode(void *cookie, bool state)
972{
973 struct radeon_device *rdev = cookie;
28d52043
DA
974 radeon_vga_set_state(rdev, state);
975 if (state)
976 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
977 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
978 else
979 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
980}
c1176d6f 981
1bcb04f7
CK
982/**
983 * radeon_check_pot_argument - check that argument is a power of two
984 *
985 * @arg: value to check
986 *
987 * Validates that a certain argument is a power of two (all asics).
988 * Returns true if argument is valid.
989 */
990static bool radeon_check_pot_argument(int arg)
991{
992 return (arg & (arg - 1)) == 0;
993}
994
0c195119
AD
995/**
996 * radeon_check_arguments - validate module params
997 *
998 * @rdev: radeon_device pointer
999 *
1000 * Validates certain module parameters and updates
1001 * the associated values used by the driver (all asics).
1002 */
1109ca09 1003static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1004{
1005 /* vramlimit must be a power of two */
1bcb04f7 1006 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1007 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1008 radeon_vram_limit);
1009 radeon_vram_limit = 0;
36421338 1010 }
1bcb04f7 1011
edcd26e8
AD
1012 if (radeon_gart_size == -1) {
1013 /* default to a larger gart size on newer asics */
1014 if (rdev->family >= CHIP_RV770)
1015 radeon_gart_size = 1024;
1016 else
1017 radeon_gart_size = 512;
1018 }
36421338 1019 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1020 if (radeon_gart_size < 32) {
edcd26e8 1021 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1022 radeon_gart_size);
edcd26e8
AD
1023 if (rdev->family >= CHIP_RV770)
1024 radeon_gart_size = 1024;
1025 else
1026 radeon_gart_size = 512;
1bcb04f7 1027 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1028 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1029 radeon_gart_size);
edcd26e8
AD
1030 if (rdev->family >= CHIP_RV770)
1031 radeon_gart_size = 1024;
1032 else
1033 radeon_gart_size = 512;
36421338 1034 }
1bcb04f7
CK
1035 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1036
36421338
JG
1037 /* AGP mode can only be -1, 1, 2, 4, 8 */
1038 switch (radeon_agpmode) {
1039 case -1:
1040 case 0:
1041 case 1:
1042 case 2:
1043 case 4:
1044 case 8:
1045 break;
1046 default:
1047 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1048 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1049 radeon_agpmode = 0;
1050 break;
1051 }
1052}
1053
d1f9809e
ML
1054/**
1055 * radeon_switcheroo_quirk_long_wakeup - return true if longer d3 delay is
1056 * needed for waking up.
1057 *
1058 * @pdev: pci dev pointer
1059 */
1060static bool radeon_switcheroo_quirk_long_wakeup(struct pci_dev *pdev)
1061{
1062
1063 /* 6600m in a macbook pro */
1064 if (pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE &&
1065 pdev->subsystem_device == 0x00e2) {
1066 printk(KERN_INFO "radeon: quirking longer d3 wakeup delay\n");
1067 return true;
1068 }
1069
1070 return false;
1071}
1072
0c195119
AD
1073/**
1074 * radeon_switcheroo_set_state - set switcheroo state
1075 *
1076 * @pdev: pci dev pointer
1077 * @state: vga switcheroo state
1078 *
1079 * Callback for the switcheroo driver. Suspends or resumes the
1080 * the asics before or after it is powered up using ACPI methods.
1081 */
6a9ee8af
DA
1082static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1083{
1084 struct drm_device *dev = pci_get_drvdata(pdev);
10ebc0bc
DA
1085
1086 if (radeon_is_px() && state == VGA_SWITCHEROO_OFF)
1087 return;
1088
6a9ee8af 1089 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1090 unsigned d3_delay = dev->pdev->d3_delay;
1091
6a9ee8af
DA
1092 printk(KERN_INFO "radeon: switched on\n");
1093 /* don't suspend or resume card normally */
5bcf719b 1094 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e
ML
1095
1096 if (d3_delay < 20 && radeon_switcheroo_quirk_long_wakeup(pdev))
1097 dev->pdev->d3_delay = 20;
1098
10ebc0bc 1099 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1100
1101 dev->pdev->d3_delay = d3_delay;
1102
5bcf719b 1103 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1104 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1105 } else {
1106 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1107 drm_kms_helper_poll_disable(dev);
5bcf719b 1108 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
10ebc0bc 1109 radeon_suspend_kms(dev, true, true);
5bcf719b 1110 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1111 }
1112}
1113
0c195119
AD
1114/**
1115 * radeon_switcheroo_can_switch - see if switcheroo state can change
1116 *
1117 * @pdev: pci dev pointer
1118 *
1119 * Callback for the switcheroo driver. Check of the switcheroo
1120 * state can be changed.
1121 * Returns true if the state can be changed, false if not.
1122 */
6a9ee8af
DA
1123static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1124{
1125 struct drm_device *dev = pci_get_drvdata(pdev);
1126 bool can_switch;
1127
1128 spin_lock(&dev->count_lock);
1129 can_switch = (dev->open_count == 0);
1130 spin_unlock(&dev->count_lock);
1131 return can_switch;
1132}
1133
26ec685f
TI
1134static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1135 .set_gpu_state = radeon_switcheroo_set_state,
1136 .reprobe = NULL,
1137 .can_switch = radeon_switcheroo_can_switch,
1138};
6a9ee8af 1139
0c195119
AD
1140/**
1141 * radeon_device_init - initialize the driver
1142 *
1143 * @rdev: radeon_device pointer
1144 * @pdev: drm dev pointer
1145 * @pdev: pci dev pointer
1146 * @flags: driver flags
1147 *
1148 * Initializes the driver info and hw (all asics).
1149 * Returns 0 for success or an error on failure.
1150 * Called at driver startup.
1151 */
771fe6b9
JG
1152int radeon_device_init(struct radeon_device *rdev,
1153 struct drm_device *ddev,
1154 struct pci_dev *pdev,
1155 uint32_t flags)
1156{
351a52a2 1157 int r, i;
ad49f501 1158 int dma_bits;
10ebc0bc 1159 bool runtime = false;
771fe6b9 1160
771fe6b9 1161 rdev->shutdown = false;
9f022ddf 1162 rdev->dev = &pdev->dev;
771fe6b9
JG
1163 rdev->ddev = ddev;
1164 rdev->pdev = pdev;
1165 rdev->flags = flags;
1166 rdev->family = flags & RADEON_FAMILY_MASK;
1167 rdev->is_atom_bios = false;
1168 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1169 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1170 rdev->accel_working = false;
8b25ed34
AD
1171 /* set up ring ids */
1172 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1173 rdev->ring[i].idx = i;
1174 }
1b5331d9 1175
d522d9cc
TR
1176 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
1177 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1178 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 1179
771fe6b9
JG
1180 /* mutex initialization are all done here so we
1181 * can recall function without having locking issues */
d6999bc7 1182 mutex_init(&rdev->ring_lock);
40bacf16 1183 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1184 atomic_set(&rdev->ih.lock, 0);
4c788679 1185 mutex_init(&rdev->gem.mutex);
c913e23a 1186 mutex_init(&rdev->pm.mutex);
6759a0a7 1187 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1188 mutex_init(&rdev->srbm_mutex);
db7fce39 1189 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1190 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1191 init_waitqueue_head(&rdev->irq.vblank_queue);
1b9c3dd0
AD
1192 r = radeon_gem_init(rdev);
1193 if (r)
1194 return r;
721604a1 1195 /* initialize vm here */
36ff39c4 1196 mutex_init(&rdev->vm_manager.lock);
23d4f1f2
AD
1197 /* Adjust VM size here.
1198 * Currently set to 4GB ((1 << 20) 4k pages).
1199 * Max GPUVM size for cayman and SI is 40 bits.
1200 */
721604a1
JG
1201 rdev->vm_manager.max_pfn = 1 << 20;
1202 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 1203
4aac0473
JG
1204 /* Set asic functions */
1205 r = radeon_asic_init(rdev);
36421338 1206 if (r)
4aac0473 1207 return r;
36421338 1208 radeon_check_arguments(rdev);
4aac0473 1209
f95df9ca
AD
1210 /* all of the newer IGP chips have an internal gart
1211 * However some rs4xx report as AGP, so remove that here.
1212 */
1213 if ((rdev->family >= CHIP_RS400) &&
1214 (rdev->flags & RADEON_IS_IGP)) {
1215 rdev->flags &= ~RADEON_IS_AGP;
1216 }
1217
30256a3f 1218 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1219 radeon_agp_disable(rdev);
771fe6b9
JG
1220 }
1221
9ed8b1f9
AD
1222 /* Set the internal MC address mask
1223 * This is the max address of the GPU's
1224 * internal address space.
1225 */
1226 if (rdev->family >= CHIP_CAYMAN)
1227 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1228 else if (rdev->family >= CHIP_CEDAR)
1229 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1230 else
1231 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1232
ad49f501
DA
1233 /* set DMA mask + need_dma32 flags.
1234 * PCIE - can handle 40-bits.
005a83f1 1235 * IGP - can handle 40-bits
ad49f501 1236 * AGP - generally dma32 is safest
005a83f1 1237 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1238 */
1239 rdev->need_dma32 = false;
1240 if (rdev->flags & RADEON_IS_AGP)
1241 rdev->need_dma32 = true;
005a83f1 1242 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1243 (rdev->family <= CHIP_RS740))
ad49f501
DA
1244 rdev->need_dma32 = true;
1245
1246 dma_bits = rdev->need_dma32 ? 32 : 40;
1247 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1248 if (r) {
62fff811 1249 rdev->need_dma32 = true;
c52494f6 1250 dma_bits = 32;
771fe6b9
JG
1251 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1252 }
c52494f6
KRW
1253 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1254 if (r) {
1255 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1256 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1257 }
771fe6b9
JG
1258
1259 /* Registers mapping */
1260 /* TODO: block userspace mapping of io register */
2c385151 1261 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1262 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1263 spin_lock_init(&rdev->pll_idx_lock);
1264 spin_lock_init(&rdev->mc_idx_lock);
1265 spin_lock_init(&rdev->pcie_idx_lock);
1266 spin_lock_init(&rdev->pciep_idx_lock);
1267 spin_lock_init(&rdev->pif_idx_lock);
1268 spin_lock_init(&rdev->cg_idx_lock);
1269 spin_lock_init(&rdev->uvd_idx_lock);
1270 spin_lock_init(&rdev->rcu_idx_lock);
1271 spin_lock_init(&rdev->didt_idx_lock);
1272 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1273 if (rdev->family >= CHIP_BONAIRE) {
1274 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1275 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1276 } else {
1277 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1278 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1279 }
771fe6b9
JG
1280 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1281 if (rdev->rmmio == NULL) {
1282 return -ENOMEM;
1283 }
1284 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1285 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1286
75efdee1
AD
1287 /* doorbell bar mapping */
1288 if (rdev->family >= CHIP_BONAIRE)
1289 radeon_doorbell_init(rdev);
1290
351a52a2
AD
1291 /* io port mapping */
1292 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1293 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1294 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1295 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1296 break;
1297 }
1298 }
1299 if (rdev->rio_mem == NULL)
1300 DRM_ERROR("Unable to find PCI I/O BAR\n");
1301
28d52043 1302 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1303 /* this will fail for cards that aren't VGA class devices, just
1304 * ignore it */
1305 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc
DA
1306
1307 if (radeon_runtime_pm == 1)
1308 runtime = true;
1309 if ((radeon_runtime_pm == -1) && radeon_is_px())
1310 runtime = true;
1311 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1312 if (runtime)
1313 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1314
3ce0a23d 1315 r = radeon_init(rdev);
b574f251 1316 if (r)
3ce0a23d 1317 return r;
3ce0a23d 1318
04eb2206
CK
1319 r = radeon_ib_ring_tests(rdev);
1320 if (r)
1321 DRM_ERROR("ib ring test failed (%d).\n", r);
1322
409851f4
JG
1323 r = radeon_gem_debugfs_init(rdev);
1324 if (r) {
1325 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
1326 }
1327
b574f251
JG
1328 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1329 /* Acceleration not working on AGP card try again
1330 * with fallback to PCI or PCIE GART
1331 */
a2d07b74 1332 radeon_asic_reset(rdev);
b574f251
JG
1333 radeon_fini(rdev);
1334 radeon_agp_disable(rdev);
1335 r = radeon_init(rdev);
4aac0473
JG
1336 if (r)
1337 return r;
771fe6b9 1338 }
60a7e396 1339 if ((radeon_testing & 1)) {
4a1132a0
AD
1340 if (rdev->accel_working)
1341 radeon_test_moves(rdev);
1342 else
1343 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1344 }
60a7e396 1345 if ((radeon_testing & 2)) {
4a1132a0
AD
1346 if (rdev->accel_working)
1347 radeon_test_syncing(rdev);
1348 else
1349 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1350 }
771fe6b9 1351 if (radeon_benchmarking) {
4a1132a0
AD
1352 if (rdev->accel_working)
1353 radeon_benchmark(rdev, radeon_benchmarking);
1354 else
1355 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1356 }
6cf8a3f5 1357 return 0;
771fe6b9
JG
1358}
1359
4d8bf9ae
CK
1360static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1361
0c195119
AD
1362/**
1363 * radeon_device_fini - tear down the driver
1364 *
1365 * @rdev: radeon_device pointer
1366 *
1367 * Tear down the driver info (all asics).
1368 * Called at driver shutdown.
1369 */
771fe6b9
JG
1370void radeon_device_fini(struct radeon_device *rdev)
1371{
771fe6b9
JG
1372 DRM_INFO("radeon: finishing device.\n");
1373 rdev->shutdown = true;
90aca4d2
JG
1374 /* evict vram memory */
1375 radeon_bo_evict_vram(rdev);
62a8ea3f 1376 radeon_fini(rdev);
6a9ee8af 1377 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 1378 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1379 if (rdev->rio_mem)
1380 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1381 rdev->rio_mem = NULL;
771fe6b9
JG
1382 iounmap(rdev->rmmio);
1383 rdev->rmmio = NULL;
75efdee1
AD
1384 if (rdev->family >= CHIP_BONAIRE)
1385 radeon_doorbell_fini(rdev);
4d8bf9ae 1386 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1387}
1388
1389
1390/*
1391 * Suspend & resume.
1392 */
0c195119
AD
1393/**
1394 * radeon_suspend_kms - initiate device suspend
1395 *
1396 * @pdev: drm dev pointer
1397 * @state: suspend state
1398 *
1399 * Puts the hw in the suspend state (all asics).
1400 * Returns 0 for success or an error on failure.
1401 * Called at driver suspend.
1402 */
10ebc0bc 1403int radeon_suspend_kms(struct drm_device *dev, bool suspend, bool fbcon)
771fe6b9 1404{
875c1866 1405 struct radeon_device *rdev;
771fe6b9 1406 struct drm_crtc *crtc;
d8dcaa1d 1407 struct drm_connector *connector;
7465280c 1408 int i, r;
5f8f635e 1409 bool force_completion = false;
771fe6b9 1410
875c1866 1411 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1412 return -ENODEV;
1413 }
7473e830 1414
875c1866
DJ
1415 rdev = dev->dev_private;
1416
5bcf719b 1417 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1418 return 0;
d8dcaa1d 1419
86698c20
SF
1420 drm_kms_helper_poll_disable(dev);
1421
d8dcaa1d
AD
1422 /* turn off display hw */
1423 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1424 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1425 }
1426
771fe6b9
JG
1427 /* unpin the front buffers */
1428 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1429 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 1430 struct radeon_bo *robj;
771fe6b9
JG
1431
1432 if (rfb == NULL || rfb->obj == NULL) {
1433 continue;
1434 }
7e4d15d9 1435 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1436 /* don't unpin kernel fb objects */
1437 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1438 r = radeon_bo_reserve(robj, false);
38651674 1439 if (r == 0) {
4c788679
JG
1440 radeon_bo_unpin(robj);
1441 radeon_bo_unreserve(robj);
1442 }
771fe6b9
JG
1443 }
1444 }
1445 /* evict vram memory */
4c788679 1446 radeon_bo_evict_vram(rdev);
8a47cc9e
CK
1447
1448 mutex_lock(&rdev->ring_lock);
771fe6b9 1449 /* wait for gpu to finish processing current batch */
5f8f635e
JG
1450 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1451 r = radeon_fence_wait_empty_locked(rdev, i);
1452 if (r) {
1453 /* delay GPU reset to resume */
1454 force_completion = true;
1455 }
1456 }
1457 if (force_completion) {
1458 radeon_fence_driver_force_completion(rdev);
1459 }
8a47cc9e 1460 mutex_unlock(&rdev->ring_lock);
771fe6b9 1461
f657c2a7
YZ
1462 radeon_save_bios_scratch_regs(rdev);
1463
ce8f5370 1464 radeon_pm_suspend(rdev);
62a8ea3f 1465 radeon_suspend(rdev);
d4877cf2 1466 radeon_hpd_fini(rdev);
771fe6b9 1467 /* evict remaining vram memory */
4c788679 1468 radeon_bo_evict_vram(rdev);
771fe6b9 1469
10b06122
JG
1470 radeon_agp_suspend(rdev);
1471
771fe6b9 1472 pci_save_state(dev->pdev);
7473e830 1473 if (suspend) {
771fe6b9
JG
1474 /* Shut down the device */
1475 pci_disable_device(dev->pdev);
1476 pci_set_power_state(dev->pdev, PCI_D3hot);
1477 }
10ebc0bc
DA
1478
1479 if (fbcon) {
1480 console_lock();
1481 radeon_fbdev_set_suspend(rdev, 1);
1482 console_unlock();
1483 }
771fe6b9
JG
1484 return 0;
1485}
1486
0c195119
AD
1487/**
1488 * radeon_resume_kms - initiate device resume
1489 *
1490 * @pdev: drm dev pointer
1491 *
1492 * Bring the hw back to operating state (all asics).
1493 * Returns 0 for success or an error on failure.
1494 * Called at driver resume.
1495 */
10ebc0bc 1496int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1497{
09bdf591 1498 struct drm_connector *connector;
771fe6b9 1499 struct radeon_device *rdev = dev->dev_private;
04eb2206 1500 int r;
771fe6b9 1501
5bcf719b 1502 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1503 return 0;
1504
10ebc0bc
DA
1505 if (fbcon) {
1506 console_lock();
1507 }
7473e830
DA
1508 if (resume) {
1509 pci_set_power_state(dev->pdev, PCI_D0);
1510 pci_restore_state(dev->pdev);
1511 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1512 if (fbcon)
1513 console_unlock();
7473e830
DA
1514 return -1;
1515 }
771fe6b9 1516 }
0ebf1717
DA
1517 /* resume AGP if in use */
1518 radeon_agp_resume(rdev);
62a8ea3f 1519 radeon_resume(rdev);
04eb2206
CK
1520
1521 r = radeon_ib_ring_tests(rdev);
1522 if (r)
1523 DRM_ERROR("ib ring test failed (%d).\n", r);
1524
ce8f5370 1525 radeon_pm_resume(rdev);
f657c2a7 1526 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1527
10ebc0bc
DA
1528 if (fbcon) {
1529 radeon_fbdev_set_suspend(rdev, 0);
1530 console_unlock();
1531 }
1532
3fa47d9e
AD
1533 /* init dig PHYs, disp eng pll */
1534 if (rdev->is_atom_bios) {
ac89af1e 1535 radeon_atom_encoder_init(rdev);
f3f1f03e 1536 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1537 /* turn on the BL */
1538 if (rdev->mode_info.bl_encoder) {
1539 u8 bl_level = radeon_get_backlight_level(rdev,
1540 rdev->mode_info.bl_encoder);
1541 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1542 bl_level);
1543 }
3fa47d9e 1544 }
d4877cf2
AD
1545 /* reset hpd state */
1546 radeon_hpd_init(rdev);
771fe6b9
JG
1547 /* blat the mode back in */
1548 drm_helper_resume_force_mode(dev);
a93f344d
AD
1549 /* turn on display hw */
1550 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1551 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1552 }
86698c20
SF
1553
1554 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
1555 return 0;
1556}
1557
0c195119
AD
1558/**
1559 * radeon_gpu_reset - reset the asic
1560 *
1561 * @rdev: radeon device pointer
1562 *
1563 * Attempt the reset the GPU if it has hung (all asics).
1564 * Returns 0 for success or an error on failure.
1565 */
90aca4d2
JG
1566int radeon_gpu_reset(struct radeon_device *rdev)
1567{
55d7c221
CK
1568 unsigned ring_sizes[RADEON_NUM_RINGS];
1569 uint32_t *ring_data[RADEON_NUM_RINGS];
1570
1571 bool saved = false;
1572
1573 int i, r;
8fd1b84c 1574 int resched;
90aca4d2 1575
dee53e7f 1576 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1577
1578 if (!rdev->needs_reset) {
1579 up_write(&rdev->exclusive_lock);
1580 return 0;
1581 }
1582
1583 rdev->needs_reset = false;
1584
90aca4d2 1585 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1586 /* block TTM */
1587 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
95f59509 1588 radeon_pm_suspend(rdev);
90aca4d2
JG
1589 radeon_suspend(rdev);
1590
55d7c221
CK
1591 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1592 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1593 &ring_data[i]);
1594 if (ring_sizes[i]) {
1595 saved = true;
1596 dev_info(rdev->dev, "Saved %d dwords of commands "
1597 "on ring %d.\n", ring_sizes[i], i);
1598 }
1599 }
1600
1601retry:
90aca4d2
JG
1602 r = radeon_asic_reset(rdev);
1603 if (!r) {
55d7c221 1604 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1605 radeon_resume(rdev);
55d7c221 1606 }
04eb2206 1607
55d7c221 1608 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1609
55d7c221
CK
1610 if (!r) {
1611 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1612 radeon_ring_restore(rdev, &rdev->ring[i],
1613 ring_sizes[i], ring_data[i]);
f54b350d
CK
1614 ring_sizes[i] = 0;
1615 ring_data[i] = NULL;
55d7c221
CK
1616 }
1617
1618 r = radeon_ib_ring_tests(rdev);
1619 if (r) {
1620 dev_err(rdev->dev, "ib ring test failed (%d).\n", r);
1621 if (saved) {
f54b350d 1622 saved = false;
55d7c221
CK
1623 radeon_suspend(rdev);
1624 goto retry;
1625 }
1626 }
1627 } else {
76903b96 1628 radeon_fence_driver_force_completion(rdev);
55d7c221
CK
1629 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1630 kfree(ring_data[i]);
1631 }
90aca4d2 1632 }
7a1619b9 1633
95f59509 1634 radeon_pm_resume(rdev);
d3493574
JG
1635 drm_helper_resume_force_mode(rdev->ddev);
1636
55d7c221 1637 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
7a1619b9
MD
1638 if (r) {
1639 /* bad news, how to tell it to userspace ? */
1640 dev_info(rdev->dev, "GPU reset failed\n");
1641 }
1642
dee53e7f 1643 up_write(&rdev->exclusive_lock);
90aca4d2
JG
1644 return r;
1645}
1646
771fe6b9
JG
1647
1648/*
1649 * Debugfs
1650 */
771fe6b9
JG
1651int radeon_debugfs_add_files(struct radeon_device *rdev,
1652 struct drm_info_list *files,
1653 unsigned nfiles)
1654{
1655 unsigned i;
1656
4d8bf9ae
CK
1657 for (i = 0; i < rdev->debugfs_count; i++) {
1658 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1659 /* Already registered */
1660 return 0;
1661 }
1662 }
c245cb9e 1663
4d8bf9ae 1664 i = rdev->debugfs_count + 1;
c245cb9e
MW
1665 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1666 DRM_ERROR("Reached maximum number of debugfs components.\n");
1667 DRM_ERROR("Report so we increase "
1668 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1669 return -EINVAL;
1670 }
4d8bf9ae
CK
1671 rdev->debugfs[rdev->debugfs_count].files = files;
1672 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1673 rdev->debugfs_count = i;
771fe6b9
JG
1674#if defined(CONFIG_DEBUG_FS)
1675 drm_debugfs_create_files(files, nfiles,
1676 rdev->ddev->control->debugfs_root,
1677 rdev->ddev->control);
1678 drm_debugfs_create_files(files, nfiles,
1679 rdev->ddev->primary->debugfs_root,
1680 rdev->ddev->primary);
1681#endif
1682 return 0;
1683}
1684
4d8bf9ae
CK
1685static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1686{
1687#if defined(CONFIG_DEBUG_FS)
1688 unsigned i;
1689
1690 for (i = 0; i < rdev->debugfs_count; i++) {
1691 drm_debugfs_remove_files(rdev->debugfs[i].files,
1692 rdev->debugfs[i].num_files,
1693 rdev->ddev->control);
1694 drm_debugfs_remove_files(rdev->debugfs[i].files,
1695 rdev->debugfs[i].num_files,
1696 rdev->ddev->primary);
1697 }
1698#endif
1699}
1700
771fe6b9
JG
1701#if defined(CONFIG_DEBUG_FS)
1702int radeon_debugfs_init(struct drm_minor *minor)
1703{
1704 return 0;
1705}
1706
1707void radeon_debugfs_cleanup(struct drm_minor *minor)
1708{
771fe6b9
JG
1709}
1710#endif
This page took 0.472065 seconds and 5 git commands to generate.