Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
b8751946 33#include <linux/pm_runtime.h>
28d52043 34#include <linux/vgaarb.h>
6a9ee8af 35#include <linux/vga_switcheroo.h>
bcc65fd8 36#include <linux/efi.h>
771fe6b9
JG
37#include "radeon_reg.h"
38#include "radeon.h"
771fe6b9
JG
39#include "atom.h"
40
1b5331d9
JG
41static const char radeon_family_name[][16] = {
42 "R100",
43 "RV100",
44 "RS100",
45 "RV200",
46 "RS200",
47 "R200",
48 "RV250",
49 "RS300",
50 "RV280",
51 "R300",
52 "R350",
53 "RV350",
54 "RV380",
55 "R420",
56 "R423",
57 "RV410",
58 "RS400",
59 "RS480",
60 "RS600",
61 "RS690",
62 "RS740",
63 "RV515",
64 "R520",
65 "RV530",
66 "RV560",
67 "RV570",
68 "R580",
69 "R600",
70 "RV610",
71 "RV630",
72 "RV670",
73 "RV620",
74 "RV635",
75 "RS780",
76 "RS880",
77 "RV770",
78 "RV730",
79 "RV710",
80 "RV740",
81 "CEDAR",
82 "REDWOOD",
83 "JUNIPER",
84 "CYPRESS",
85 "HEMLOCK",
b08ebe7e 86 "PALM",
4df64e65
AD
87 "SUMO",
88 "SUMO2",
1fe18305
AD
89 "BARTS",
90 "TURKS",
91 "CAICOS",
b7cfc9fe 92 "CAYMAN",
8848f759 93 "ARUBA",
cb28bb34
AD
94 "TAHITI",
95 "PITCAIRN",
96 "VERDE",
624d3524 97 "OLAND",
b5d9d726 98 "HAINAN",
6eac752e
AD
99 "BONAIRE",
100 "KAVERI",
101 "KABINI",
3bf599e8 102 "HAWAII",
b0a9f22a 103 "MULLINS",
1b5331d9
JG
104 "LAST",
105};
106
4807c5a8
AD
107#define RADEON_PX_QUIRK_DISABLE_PX (1 << 0)
108#define RADEON_PX_QUIRK_LONG_WAKEUP (1 << 1)
109
110struct radeon_px_quirk {
111 u32 chip_vendor;
112 u32 chip_device;
113 u32 subsys_vendor;
114 u32 subsys_device;
115 u32 px_quirk_flags;
116};
117
118static struct radeon_px_quirk radeon_px_quirk_list[] = {
119 /* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
120 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
121 */
122 { PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
123 /* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
124 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
125 */
126 { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
ff1b1294
AD
127 /* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
128 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
129 */
130 { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
4807c5a8
AD
131 /* macbook pro 8.2 */
132 { PCI_VENDOR_ID_ATI, 0x6741, PCI_VENDOR_ID_APPLE, 0x00e2, RADEON_PX_QUIRK_LONG_WAKEUP },
133 { 0, 0, 0, 0, 0 },
134};
135
90c4cde9
AD
136bool radeon_is_px(struct drm_device *dev)
137{
138 struct radeon_device *rdev = dev->dev_private;
139
140 if (rdev->flags & RADEON_IS_PX)
141 return true;
142 return false;
143}
10ebc0bc 144
4807c5a8
AD
145static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
146{
147 struct radeon_px_quirk *p = radeon_px_quirk_list;
148
149 /* Apply PX quirks */
150 while (p && p->chip_device != 0) {
151 if (rdev->pdev->vendor == p->chip_vendor &&
152 rdev->pdev->device == p->chip_device &&
153 rdev->pdev->subsystem_vendor == p->subsys_vendor &&
154 rdev->pdev->subsystem_device == p->subsys_device) {
155 rdev->px_quirk_flags = p->px_quirk_flags;
156 break;
157 }
158 ++p;
159 }
160
161 if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
162 rdev->flags &= ~RADEON_IS_PX;
163}
164
2e1b65f9
AD
165/**
166 * radeon_program_register_sequence - program an array of registers.
167 *
168 * @rdev: radeon_device pointer
169 * @registers: pointer to the register array
170 * @array_size: size of the register array
171 *
172 * Programs an array or registers with and and or masks.
173 * This is a helper for setting golden registers.
174 */
175void radeon_program_register_sequence(struct radeon_device *rdev,
176 const u32 *registers,
177 const u32 array_size)
178{
179 u32 tmp, reg, and_mask, or_mask;
180 int i;
181
182 if (array_size % 3)
183 return;
184
185 for (i = 0; i < array_size; i +=3) {
186 reg = registers[i + 0];
187 and_mask = registers[i + 1];
188 or_mask = registers[i + 2];
189
190 if (and_mask == 0xffffffff) {
191 tmp = or_mask;
192 } else {
193 tmp = RREG32(reg);
194 tmp &= ~and_mask;
195 tmp |= or_mask;
196 }
197 WREG32(reg, tmp);
198 }
199}
200
1a0041b8
AD
201void radeon_pci_config_reset(struct radeon_device *rdev)
202{
203 pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
204}
205
0c195119
AD
206/**
207 * radeon_surface_init - Clear GPU surface registers.
208 *
209 * @rdev: radeon_device pointer
210 *
211 * Clear GPU surface registers (r1xx-r5xx).
b1e3a6d1 212 */
3ce0a23d 213void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
214{
215 /* FIXME: check this out */
216 if (rdev->family < CHIP_R600) {
217 int i;
218
550e2d92
DA
219 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
220 if (rdev->surface_regs[i].bo)
221 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
222 else
223 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 224 }
e024e110
DA
225 /* enable surfaces */
226 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
227 }
228}
229
771fe6b9
JG
230/*
231 * GPU scratch registers helpers function.
232 */
0c195119
AD
233/**
234 * radeon_scratch_init - Init scratch register driver information.
235 *
236 * @rdev: radeon_device pointer
237 *
238 * Init CP scratch register driver information (r1xx-r5xx)
239 */
3ce0a23d 240void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
241{
242 int i;
243
244 /* FIXME: check this out */
245 if (rdev->family < CHIP_R300) {
246 rdev->scratch.num_reg = 5;
247 } else {
248 rdev->scratch.num_reg = 7;
249 }
724c80e1 250 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
251 for (i = 0; i < rdev->scratch.num_reg; i++) {
252 rdev->scratch.free[i] = true;
724c80e1 253 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
254 }
255}
256
0c195119
AD
257/**
258 * radeon_scratch_get - Allocate a scratch register
259 *
260 * @rdev: radeon_device pointer
261 * @reg: scratch register mmio offset
262 *
263 * Allocate a CP scratch register for use by the driver (all asics).
264 * Returns 0 on success or -EINVAL on failure.
265 */
771fe6b9
JG
266int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
267{
268 int i;
269
270 for (i = 0; i < rdev->scratch.num_reg; i++) {
271 if (rdev->scratch.free[i]) {
272 rdev->scratch.free[i] = false;
273 *reg = rdev->scratch.reg[i];
274 return 0;
275 }
276 }
277 return -EINVAL;
278}
279
0c195119
AD
280/**
281 * radeon_scratch_free - Free a scratch register
282 *
283 * @rdev: radeon_device pointer
284 * @reg: scratch register mmio offset
285 *
286 * Free a CP scratch register allocated for use by the driver (all asics)
287 */
771fe6b9
JG
288void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
289{
290 int i;
291
292 for (i = 0; i < rdev->scratch.num_reg; i++) {
293 if (rdev->scratch.reg[i] == reg) {
294 rdev->scratch.free[i] = true;
295 return;
296 }
297 }
298}
299
75efdee1
AD
300/*
301 * GPU doorbell aperture helpers function.
302 */
303/**
304 * radeon_doorbell_init - Init doorbell driver information.
305 *
306 * @rdev: radeon_device pointer
307 *
308 * Init doorbell driver information (CIK)
309 * Returns 0 on success, error on failure.
310 */
28f5a6cd 311static int radeon_doorbell_init(struct radeon_device *rdev)
75efdee1 312{
75efdee1
AD
313 /* doorbell bar mapping */
314 rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
315 rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
316
d5754ab8
AL
317 rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
318 if (rdev->doorbell.num_doorbells == 0)
319 return -EINVAL;
75efdee1 320
d5754ab8 321 rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
75efdee1
AD
322 if (rdev->doorbell.ptr == NULL) {
323 return -ENOMEM;
324 }
325 DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
326 DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
327
d5754ab8 328 memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
75efdee1 329
75efdee1
AD
330 return 0;
331}
332
333/**
334 * radeon_doorbell_fini - Tear down doorbell driver information.
335 *
336 * @rdev: radeon_device pointer
337 *
338 * Tear down doorbell driver information (CIK)
339 */
28f5a6cd 340static void radeon_doorbell_fini(struct radeon_device *rdev)
75efdee1
AD
341{
342 iounmap(rdev->doorbell.ptr);
343 rdev->doorbell.ptr = NULL;
344}
345
346/**
d5754ab8 347 * radeon_doorbell_get - Allocate a doorbell entry
75efdee1
AD
348 *
349 * @rdev: radeon_device pointer
d5754ab8 350 * @doorbell: doorbell index
75efdee1 351 *
d5754ab8 352 * Allocate a doorbell for use by the driver (all asics).
75efdee1
AD
353 * Returns 0 on success or -EINVAL on failure.
354 */
355int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
356{
d5754ab8
AL
357 unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
358 if (offset < rdev->doorbell.num_doorbells) {
359 __set_bit(offset, rdev->doorbell.used);
360 *doorbell = offset;
361 return 0;
362 } else {
363 return -EINVAL;
75efdee1 364 }
75efdee1
AD
365}
366
367/**
d5754ab8 368 * radeon_doorbell_free - Free a doorbell entry
75efdee1
AD
369 *
370 * @rdev: radeon_device pointer
d5754ab8 371 * @doorbell: doorbell index
75efdee1 372 *
d5754ab8 373 * Free a doorbell allocated for use by the driver (all asics)
75efdee1
AD
374 */
375void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
376{
d5754ab8
AL
377 if (doorbell < rdev->doorbell.num_doorbells)
378 __clear_bit(doorbell, rdev->doorbell.used);
75efdee1
AD
379}
380
ebff8453
OG
381/**
382 * radeon_doorbell_get_kfd_info - Report doorbell configuration required to
383 * setup KFD
384 *
385 * @rdev: radeon_device pointer
386 * @aperture_base: output returning doorbell aperture base physical address
387 * @aperture_size: output returning doorbell aperture size in bytes
388 * @start_offset: output returning # of doorbell bytes reserved for radeon.
389 *
390 * Radeon and the KFD share the doorbell aperture. Radeon sets it up,
391 * takes doorbells required for its own rings and reports the setup to KFD.
392 * Radeon reserved doorbells are at the start of the doorbell aperture.
393 */
394void radeon_doorbell_get_kfd_info(struct radeon_device *rdev,
395 phys_addr_t *aperture_base,
396 size_t *aperture_size,
397 size_t *start_offset)
398{
399 /* The first num_doorbells are used by radeon.
400 * KFD takes whatever's left in the aperture. */
401 if (rdev->doorbell.size > rdev->doorbell.num_doorbells * sizeof(u32)) {
402 *aperture_base = rdev->doorbell.base;
403 *aperture_size = rdev->doorbell.size;
404 *start_offset = rdev->doorbell.num_doorbells * sizeof(u32);
405 } else {
406 *aperture_base = 0;
407 *aperture_size = 0;
408 *start_offset = 0;
409 }
410}
411
0c195119
AD
412/*
413 * radeon_wb_*()
414 * Writeback is the the method by which the the GPU updates special pages
415 * in memory with the status of certain GPU events (fences, ring pointers,
416 * etc.).
417 */
418
419/**
420 * radeon_wb_disable - Disable Writeback
421 *
422 * @rdev: radeon_device pointer
423 *
424 * Disables Writeback (all asics). Used for suspend.
425 */
724c80e1
AD
426void radeon_wb_disable(struct radeon_device *rdev)
427{
724c80e1
AD
428 rdev->wb.enabled = false;
429}
430
0c195119
AD
431/**
432 * radeon_wb_fini - Disable Writeback and free memory
433 *
434 * @rdev: radeon_device pointer
435 *
436 * Disables Writeback and frees the Writeback memory (all asics).
437 * Used at driver shutdown.
438 */
724c80e1
AD
439void radeon_wb_fini(struct radeon_device *rdev)
440{
441 radeon_wb_disable(rdev);
442 if (rdev->wb.wb_obj) {
089920f2
JG
443 if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
444 radeon_bo_kunmap(rdev->wb.wb_obj);
445 radeon_bo_unpin(rdev->wb.wb_obj);
446 radeon_bo_unreserve(rdev->wb.wb_obj);
447 }
724c80e1
AD
448 radeon_bo_unref(&rdev->wb.wb_obj);
449 rdev->wb.wb = NULL;
450 rdev->wb.wb_obj = NULL;
451 }
452}
453
0c195119
AD
454/**
455 * radeon_wb_init- Init Writeback driver info and allocate memory
456 *
457 * @rdev: radeon_device pointer
458 *
459 * Disables Writeback and frees the Writeback memory (all asics).
460 * Used at driver startup.
461 * Returns 0 on success or an -error on failure.
462 */
724c80e1
AD
463int radeon_wb_init(struct radeon_device *rdev)
464{
465 int r;
466
467 if (rdev->wb.wb_obj == NULL) {
441921d5 468 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
831b6966 469 RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
02376d82 470 &rdev->wb.wb_obj);
724c80e1
AD
471 if (r) {
472 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
473 return r;
474 }
089920f2
JG
475 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
476 if (unlikely(r != 0)) {
477 radeon_wb_fini(rdev);
478 return r;
479 }
480 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
481 &rdev->wb.gpu_addr);
482 if (r) {
483 radeon_bo_unreserve(rdev->wb.wb_obj);
484 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
485 radeon_wb_fini(rdev);
486 return r;
487 }
488 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
724c80e1 489 radeon_bo_unreserve(rdev->wb.wb_obj);
089920f2
JG
490 if (r) {
491 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
492 radeon_wb_fini(rdev);
493 return r;
494 }
724c80e1
AD
495 }
496
e6ba7599
AD
497 /* clear wb memory */
498 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
499 /* disable event_write fences */
500 rdev->wb.use_event = false;
724c80e1 501 /* disabled via module param */
3b7a2b24 502 if (radeon_no_wb == 1) {
724c80e1 503 rdev->wb.enabled = false;
3b7a2b24 504 } else {
724c80e1 505 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
506 /* often unreliable on AGP */
507 rdev->wb.enabled = false;
508 } else if (rdev->family < CHIP_R300) {
509 /* often unreliable on pre-r300 */
724c80e1 510 rdev->wb.enabled = false;
d0f8a854 511 } else {
724c80e1 512 rdev->wb.enabled = true;
d0f8a854 513 /* event_write fences are only available on r600+ */
3b7a2b24 514 if (rdev->family >= CHIP_R600) {
d0f8a854 515 rdev->wb.use_event = true;
3b7a2b24 516 }
d0f8a854 517 }
724c80e1 518 }
c994ead6
AD
519 /* always use writeback/events on NI, APUs */
520 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
521 rdev->wb.enabled = true;
522 rdev->wb.use_event = true;
523 }
724c80e1
AD
524
525 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
526
527 return 0;
528}
529
d594e46a
JG
530/**
531 * radeon_vram_location - try to find VRAM location
532 * @rdev: radeon device structure holding all necessary informations
533 * @mc: memory controller structure holding memory informations
534 * @base: base address at which to put VRAM
535 *
536 * Function will place try to place VRAM at base address provided
537 * as parameter (which is so far either PCI aperture address or
538 * for IGP TOM base address).
539 *
540 * If there is not enough space to fit the unvisible VRAM in the 32bits
541 * address space then we limit the VRAM size to the aperture.
542 *
543 * If we are using AGP and if the AGP aperture doesn't allow us to have
544 * room for all the VRAM than we restrict the VRAM to the PCI aperture
545 * size and print a warning.
546 *
547 * This function will never fails, worst case are limiting VRAM.
548 *
549 * Note: GTT start, end, size should be initialized before calling this
550 * function on AGP platform.
551 *
25985edc 552 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
553 * this shouldn't be a problem as we are using the PCI aperture as a reference.
554 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
555 * not IGP.
556 *
557 * Note: we use mc_vram_size as on some board we need to program the mc to
558 * cover the whole aperture even if VRAM size is inferior to aperture size
559 * Novell bug 204882 + along with lots of ubuntu ones
560 *
561 * Note: when limiting vram it's safe to overwritte real_vram_size because
562 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
563 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
564 * ones)
565 *
566 * Note: IGP TOM addr should be the same as the aperture addr, we don't
567 * explicitly check for that thought.
568 *
569 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 570 */
d594e46a 571void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 572{
1bcb04f7
CK
573 uint64_t limit = (uint64_t)radeon_vram_limit << 20;
574
d594e46a 575 mc->vram_start = base;
9ed8b1f9 576 if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
d594e46a
JG
577 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
578 mc->real_vram_size = mc->aper_size;
579 mc->mc_vram_size = mc->aper_size;
580 }
581 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 582 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
583 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
584 mc->real_vram_size = mc->aper_size;
585 mc->mc_vram_size = mc->aper_size;
586 }
587 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
1bcb04f7
CK
588 if (limit && limit < mc->real_vram_size)
589 mc->real_vram_size = limit;
dd7cc55a 590 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
591 mc->mc_vram_size >> 20, mc->vram_start,
592 mc->vram_end, mc->real_vram_size >> 20);
593}
771fe6b9 594
d594e46a
JG
595/**
596 * radeon_gtt_location - try to find GTT location
597 * @rdev: radeon device structure holding all necessary informations
598 * @mc: memory controller structure holding memory informations
599 *
600 * Function will place try to place GTT before or after VRAM.
601 *
602 * If GTT size is bigger than space left then we ajust GTT size.
603 * Thus function will never fails.
604 *
605 * FIXME: when reducing GTT size align new size on power of 2.
606 */
607void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
608{
609 u64 size_af, size_bf;
610
9ed8b1f9 611 size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
8d369bb1 612 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
613 if (size_bf > size_af) {
614 if (mc->gtt_size > size_bf) {
615 dev_warn(rdev->dev, "limiting GTT\n");
616 mc->gtt_size = size_bf;
771fe6b9 617 }
8d369bb1 618 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 619 } else {
d594e46a
JG
620 if (mc->gtt_size > size_af) {
621 dev_warn(rdev->dev, "limiting GTT\n");
622 mc->gtt_size = size_af;
623 }
8d369bb1 624 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 625 }
d594e46a 626 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 627 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 628 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
629}
630
771fe6b9
JG
631/*
632 * GPU helpers function.
633 */
05082b8b
AD
634
635/**
636 * radeon_device_is_virtual - check if we are running is a virtual environment
637 *
638 * Check if the asic has been passed through to a VM (all asics).
639 * Used at driver startup.
640 * Returns true if virtual or false if not.
641 */
642static bool radeon_device_is_virtual(void)
643{
644#ifdef CONFIG_X86
645 return boot_cpu_has(X86_FEATURE_HYPERVISOR);
646#else
647 return false;
648#endif
649}
650
0c195119
AD
651/**
652 * radeon_card_posted - check if the hw has already been initialized
653 *
654 * @rdev: radeon_device pointer
655 *
656 * Check if the asic has been initialized (all asics).
657 * Used at driver startup.
658 * Returns true if initialized or false if not.
659 */
9f022ddf 660bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
661{
662 uint32_t reg;
663
05082b8b
AD
664 /* for pass through, always force asic_init */
665 if (radeon_device_is_virtual())
666 return false;
667
50a583f6 668 /* required for EFI mode on macbook2,1 which uses an r5xx asic */
83e68189 669 if (efi_enabled(EFI_BOOT) &&
50a583f6
AD
670 (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
671 (rdev->family < CHIP_R600))
bcc65fd8
MG
672 return false;
673
2cf3a4fc
AD
674 if (ASIC_IS_NODCE(rdev))
675 goto check_memsize;
676
771fe6b9 677 /* first check CRTCs */
09fb8bd1 678 if (ASIC_IS_DCE4(rdev)) {
18007401
AD
679 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
680 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
09fb8bd1
AD
681 if (rdev->num_crtc >= 4) {
682 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
683 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
684 }
685 if (rdev->num_crtc >= 6) {
686 reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
687 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
688 }
bcc1c2a1
AD
689 if (reg & EVERGREEN_CRTC_MASTER_EN)
690 return true;
691 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
692 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
693 RREG32(AVIVO_D2CRTC_CONTROL);
694 if (reg & AVIVO_CRTC_EN) {
695 return true;
696 }
697 } else {
698 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
699 RREG32(RADEON_CRTC2_GEN_CNTL);
700 if (reg & RADEON_CRTC_EN) {
701 return true;
702 }
703 }
704
2cf3a4fc 705check_memsize:
771fe6b9
JG
706 /* then check MEM_SIZE, in case the crtcs are off */
707 if (rdev->family >= CHIP_R600)
708 reg = RREG32(R600_CONFIG_MEMSIZE);
709 else
710 reg = RREG32(RADEON_CONFIG_MEMSIZE);
711
712 if (reg)
713 return true;
714
715 return false;
716
717}
718
0c195119
AD
719/**
720 * radeon_update_bandwidth_info - update display bandwidth params
721 *
722 * @rdev: radeon_device pointer
723 *
724 * Used when sclk/mclk are switched or display modes are set.
725 * params are used to calculate display watermarks (all asics)
726 */
f47299c5
AD
727void radeon_update_bandwidth_info(struct radeon_device *rdev)
728{
729 fixed20_12 a;
8807286e
AD
730 u32 sclk = rdev->pm.current_sclk;
731 u32 mclk = rdev->pm.current_mclk;
f47299c5 732
8807286e
AD
733 /* sclk/mclk in Mhz */
734 a.full = dfixed_const(100);
735 rdev->pm.sclk.full = dfixed_const(sclk);
736 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
737 rdev->pm.mclk.full = dfixed_const(mclk);
738 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 739
8807286e 740 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 741 a.full = dfixed_const(16);
f47299c5 742 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 743 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
744 }
745}
746
0c195119
AD
747/**
748 * radeon_boot_test_post_card - check and possibly initialize the hw
749 *
750 * @rdev: radeon_device pointer
751 *
752 * Check if the asic is initialized and if not, attempt to initialize
753 * it (all asics).
754 * Returns true if initialized or false if not.
755 */
72542d77
DA
756bool radeon_boot_test_post_card(struct radeon_device *rdev)
757{
758 if (radeon_card_posted(rdev))
759 return true;
760
761 if (rdev->bios) {
762 DRM_INFO("GPU not posted. posting now...\n");
763 if (rdev->is_atom_bios)
764 atom_asic_init(rdev->mode_info.atom_context);
765 else
766 radeon_combios_asic_init(rdev->ddev);
767 return true;
768 } else {
769 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
770 return false;
771 }
772}
773
0c195119
AD
774/**
775 * radeon_dummy_page_init - init dummy page used by the driver
776 *
777 * @rdev: radeon_device pointer
778 *
779 * Allocate the dummy page used by the driver (all asics).
780 * This dummy page is used by the driver as a filler for gart entries
781 * when pages are taken out of the GART
782 * Returns 0 on sucess, -ENOMEM on failure.
783 */
3ce0a23d
JG
784int radeon_dummy_page_init(struct radeon_device *rdev)
785{
82568565
DA
786 if (rdev->dummy_page.page)
787 return 0;
3ce0a23d
JG
788 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
789 if (rdev->dummy_page.page == NULL)
790 return -ENOMEM;
791 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
792 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
793 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
794 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
795 __free_page(rdev->dummy_page.page);
796 rdev->dummy_page.page = NULL;
797 return -ENOMEM;
798 }
cb658906
MD
799 rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
800 RADEON_GART_PAGE_DUMMY);
3ce0a23d
JG
801 return 0;
802}
803
0c195119
AD
804/**
805 * radeon_dummy_page_fini - free dummy page used by the driver
806 *
807 * @rdev: radeon_device pointer
808 *
809 * Frees the dummy page used by the driver (all asics).
810 */
3ce0a23d
JG
811void radeon_dummy_page_fini(struct radeon_device *rdev)
812{
813 if (rdev->dummy_page.page == NULL)
814 return;
815 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
816 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
817 __free_page(rdev->dummy_page.page);
818 rdev->dummy_page.page = NULL;
819}
820
771fe6b9 821
771fe6b9 822/* ATOM accessor methods */
0c195119
AD
823/*
824 * ATOM is an interpreted byte code stored in tables in the vbios. The
825 * driver registers callbacks to access registers and the interpreter
826 * in the driver parses the tables and executes then to program specific
827 * actions (set display modes, asic init, etc.). See radeon_atombios.c,
828 * atombios.h, and atom.c
829 */
830
831/**
832 * cail_pll_read - read PLL register
833 *
834 * @info: atom card_info pointer
835 * @reg: PLL register offset
836 *
837 * Provides a PLL register accessor for the atom interpreter (r4xx+).
838 * Returns the value of the PLL register.
839 */
771fe6b9
JG
840static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
841{
842 struct radeon_device *rdev = info->dev->dev_private;
843 uint32_t r;
844
845 r = rdev->pll_rreg(rdev, reg);
846 return r;
847}
848
0c195119
AD
849/**
850 * cail_pll_write - write PLL register
851 *
852 * @info: atom card_info pointer
853 * @reg: PLL register offset
854 * @val: value to write to the pll register
855 *
856 * Provides a PLL register accessor for the atom interpreter (r4xx+).
857 */
771fe6b9
JG
858static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
859{
860 struct radeon_device *rdev = info->dev->dev_private;
861
862 rdev->pll_wreg(rdev, reg, val);
863}
864
0c195119
AD
865/**
866 * cail_mc_read - read MC (Memory Controller) register
867 *
868 * @info: atom card_info pointer
869 * @reg: MC register offset
870 *
871 * Provides an MC register accessor for the atom interpreter (r4xx+).
872 * Returns the value of the MC register.
873 */
771fe6b9
JG
874static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
875{
876 struct radeon_device *rdev = info->dev->dev_private;
877 uint32_t r;
878
879 r = rdev->mc_rreg(rdev, reg);
880 return r;
881}
882
0c195119
AD
883/**
884 * cail_mc_write - write MC (Memory Controller) register
885 *
886 * @info: atom card_info pointer
887 * @reg: MC register offset
888 * @val: value to write to the pll register
889 *
890 * Provides a MC register accessor for the atom interpreter (r4xx+).
891 */
771fe6b9
JG
892static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
893{
894 struct radeon_device *rdev = info->dev->dev_private;
895
896 rdev->mc_wreg(rdev, reg, val);
897}
898
0c195119
AD
899/**
900 * cail_reg_write - write MMIO register
901 *
902 * @info: atom card_info pointer
903 * @reg: MMIO register offset
904 * @val: value to write to the pll register
905 *
906 * Provides a MMIO register accessor for the atom interpreter (r4xx+).
907 */
771fe6b9
JG
908static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
909{
910 struct radeon_device *rdev = info->dev->dev_private;
911
912 WREG32(reg*4, val);
913}
914
0c195119
AD
915/**
916 * cail_reg_read - read MMIO register
917 *
918 * @info: atom card_info pointer
919 * @reg: MMIO register offset
920 *
921 * Provides an MMIO register accessor for the atom interpreter (r4xx+).
922 * Returns the value of the MMIO register.
923 */
771fe6b9
JG
924static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
925{
926 struct radeon_device *rdev = info->dev->dev_private;
927 uint32_t r;
928
929 r = RREG32(reg*4);
930 return r;
931}
932
0c195119
AD
933/**
934 * cail_ioreg_write - write IO register
935 *
936 * @info: atom card_info pointer
937 * @reg: IO register offset
938 * @val: value to write to the pll register
939 *
940 * Provides a IO register accessor for the atom interpreter (r4xx+).
941 */
351a52a2
AD
942static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
943{
944 struct radeon_device *rdev = info->dev->dev_private;
945
946 WREG32_IO(reg*4, val);
947}
948
0c195119
AD
949/**
950 * cail_ioreg_read - read IO register
951 *
952 * @info: atom card_info pointer
953 * @reg: IO register offset
954 *
955 * Provides an IO register accessor for the atom interpreter (r4xx+).
956 * Returns the value of the IO register.
957 */
351a52a2
AD
958static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
959{
960 struct radeon_device *rdev = info->dev->dev_private;
961 uint32_t r;
962
963 r = RREG32_IO(reg*4);
964 return r;
965}
966
0c195119
AD
967/**
968 * radeon_atombios_init - init the driver info and callbacks for atombios
969 *
970 * @rdev: radeon_device pointer
971 *
972 * Initializes the driver info and register access callbacks for the
973 * ATOM interpreter (r4xx+).
974 * Returns 0 on sucess, -ENOMEM on failure.
975 * Called at driver startup.
976 */
771fe6b9
JG
977int radeon_atombios_init(struct radeon_device *rdev)
978{
61c4b24b
MF
979 struct card_info *atom_card_info =
980 kzalloc(sizeof(struct card_info), GFP_KERNEL);
981
982 if (!atom_card_info)
983 return -ENOMEM;
984
985 rdev->mode_info.atom_card_info = atom_card_info;
986 atom_card_info->dev = rdev->ddev;
987 atom_card_info->reg_read = cail_reg_read;
988 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
989 /* needed for iio ops */
990 if (rdev->rio_mem) {
991 atom_card_info->ioreg_read = cail_ioreg_read;
992 atom_card_info->ioreg_write = cail_ioreg_write;
993 } else {
994 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
995 atom_card_info->ioreg_read = cail_reg_read;
996 atom_card_info->ioreg_write = cail_reg_write;
997 }
61c4b24b
MF
998 atom_card_info->mc_read = cail_mc_read;
999 atom_card_info->mc_write = cail_mc_write;
1000 atom_card_info->pll_read = cail_pll_read;
1001 atom_card_info->pll_write = cail_pll_write;
1002
1003 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
0e34d094
TG
1004 if (!rdev->mode_info.atom_context) {
1005 radeon_atombios_fini(rdev);
1006 return -ENOMEM;
1007 }
1008
c31ad97f 1009 mutex_init(&rdev->mode_info.atom_context->mutex);
1c949842 1010 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
771fe6b9 1011 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 1012 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
1013 return 0;
1014}
1015
0c195119
AD
1016/**
1017 * radeon_atombios_fini - free the driver info and callbacks for atombios
1018 *
1019 * @rdev: radeon_device pointer
1020 *
1021 * Frees the driver info and register access callbacks for the ATOM
1022 * interpreter (r4xx+).
1023 * Called at driver shutdown.
1024 */
771fe6b9
JG
1025void radeon_atombios_fini(struct radeon_device *rdev)
1026{
4a04a844
JG
1027 if (rdev->mode_info.atom_context) {
1028 kfree(rdev->mode_info.atom_context->scratch);
4a04a844 1029 }
0e34d094
TG
1030 kfree(rdev->mode_info.atom_context);
1031 rdev->mode_info.atom_context = NULL;
61c4b24b 1032 kfree(rdev->mode_info.atom_card_info);
0e34d094 1033 rdev->mode_info.atom_card_info = NULL;
771fe6b9
JG
1034}
1035
0c195119
AD
1036/* COMBIOS */
1037/*
1038 * COMBIOS is the bios format prior to ATOM. It provides
1039 * command tables similar to ATOM, but doesn't have a unified
1040 * parser. See radeon_combios.c
1041 */
1042
1043/**
1044 * radeon_combios_init - init the driver info for combios
1045 *
1046 * @rdev: radeon_device pointer
1047 *
1048 * Initializes the driver info for combios (r1xx-r3xx).
1049 * Returns 0 on sucess.
1050 * Called at driver startup.
1051 */
771fe6b9
JG
1052int radeon_combios_init(struct radeon_device *rdev)
1053{
1054 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
1055 return 0;
1056}
1057
0c195119
AD
1058/**
1059 * radeon_combios_fini - free the driver info for combios
1060 *
1061 * @rdev: radeon_device pointer
1062 *
1063 * Frees the driver info for combios (r1xx-r3xx).
1064 * Called at driver shutdown.
1065 */
771fe6b9
JG
1066void radeon_combios_fini(struct radeon_device *rdev)
1067{
1068}
1069
0c195119
AD
1070/* if we get transitioned to only one device, take VGA back */
1071/**
1072 * radeon_vga_set_decode - enable/disable vga decode
1073 *
1074 * @cookie: radeon_device pointer
1075 * @state: enable/disable vga decode
1076 *
1077 * Enable/disable vga decode (all asics).
1078 * Returns VGA resource flags.
1079 */
28d52043
DA
1080static unsigned int radeon_vga_set_decode(void *cookie, bool state)
1081{
1082 struct radeon_device *rdev = cookie;
28d52043
DA
1083 radeon_vga_set_state(rdev, state);
1084 if (state)
1085 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1086 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1087 else
1088 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1089}
c1176d6f 1090
1bcb04f7
CK
1091/**
1092 * radeon_check_pot_argument - check that argument is a power of two
1093 *
1094 * @arg: value to check
1095 *
1096 * Validates that a certain argument is a power of two (all asics).
1097 * Returns true if argument is valid.
1098 */
1099static bool radeon_check_pot_argument(int arg)
1100{
1101 return (arg & (arg - 1)) == 0;
1102}
1103
5e3c4f90
GG
1104/**
1105 * Determine a sensible default GART size according to ASIC family.
1106 *
1107 * @family ASIC family name
1108 */
1109static int radeon_gart_size_auto(enum radeon_family family)
1110{
1111 /* default to a larger gart size on newer asics */
1112 if (family >= CHIP_TAHITI)
1113 return 2048;
1114 else if (family >= CHIP_RV770)
1115 return 1024;
1116 else
1117 return 512;
1118}
1119
0c195119
AD
1120/**
1121 * radeon_check_arguments - validate module params
1122 *
1123 * @rdev: radeon_device pointer
1124 *
1125 * Validates certain module parameters and updates
1126 * the associated values used by the driver (all asics).
1127 */
1109ca09 1128static void radeon_check_arguments(struct radeon_device *rdev)
36421338
JG
1129{
1130 /* vramlimit must be a power of two */
1bcb04f7 1131 if (!radeon_check_pot_argument(radeon_vram_limit)) {
36421338
JG
1132 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1133 radeon_vram_limit);
1134 radeon_vram_limit = 0;
36421338 1135 }
1bcb04f7 1136
edcd26e8 1137 if (radeon_gart_size == -1) {
5e3c4f90 1138 radeon_gart_size = radeon_gart_size_auto(rdev->family);
edcd26e8 1139 }
36421338 1140 /* gtt size must be power of two and greater or equal to 32M */
1bcb04f7 1141 if (radeon_gart_size < 32) {
edcd26e8 1142 dev_warn(rdev->dev, "gart size (%d) too small\n",
36421338 1143 radeon_gart_size);
5e3c4f90 1144 radeon_gart_size = radeon_gart_size_auto(rdev->family);
1bcb04f7 1145 } else if (!radeon_check_pot_argument(radeon_gart_size)) {
36421338
JG
1146 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1147 radeon_gart_size);
5e3c4f90 1148 radeon_gart_size = radeon_gart_size_auto(rdev->family);
36421338 1149 }
1bcb04f7
CK
1150 rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1151
36421338
JG
1152 /* AGP mode can only be -1, 1, 2, 4, 8 */
1153 switch (radeon_agpmode) {
1154 case -1:
1155 case 0:
1156 case 1:
1157 case 2:
1158 case 4:
1159 case 8:
1160 break;
1161 default:
1162 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1163 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1164 radeon_agpmode = 0;
1165 break;
1166 }
c1c44132
CK
1167
1168 if (!radeon_check_pot_argument(radeon_vm_size)) {
1169 dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1170 radeon_vm_size);
20b2656d 1171 radeon_vm_size = 4;
c1c44132
CK
1172 }
1173
20b2656d 1174 if (radeon_vm_size < 1) {
13c240ef 1175 dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
c1c44132 1176 radeon_vm_size);
20b2656d 1177 radeon_vm_size = 4;
c1c44132
CK
1178 }
1179
3cf8bb1a
JG
1180 /*
1181 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1182 */
20b2656d
CK
1183 if (radeon_vm_size > 1024) {
1184 dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
c1c44132 1185 radeon_vm_size);
20b2656d 1186 radeon_vm_size = 4;
c1c44132 1187 }
4510fb98
CK
1188
1189 /* defines number of bits in page table versus page directory,
1190 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1191 * page table and the remaining bits are in the page directory */
dfc230f9
CK
1192 if (radeon_vm_block_size == -1) {
1193
1194 /* Total bits covered by PD + PTs */
8e66e134 1195 unsigned bits = ilog2(radeon_vm_size) + 18;
dfc230f9
CK
1196
1197 /* Make sure the PD is 4K in size up to 8GB address space.
1198 Above that split equal between PD and PTs */
1199 if (radeon_vm_size <= 8)
1200 radeon_vm_block_size = bits - 9;
1201 else
1202 radeon_vm_block_size = (bits + 3) / 2;
1203
1204 } else if (radeon_vm_block_size < 9) {
20b2656d 1205 dev_warn(rdev->dev, "VM page table size (%d) too small\n",
4510fb98
CK
1206 radeon_vm_block_size);
1207 radeon_vm_block_size = 9;
1208 }
1209
1210 if (radeon_vm_block_size > 24 ||
20b2656d
CK
1211 (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1212 dev_warn(rdev->dev, "VM page table size (%d) too large\n",
4510fb98
CK
1213 radeon_vm_block_size);
1214 radeon_vm_block_size = 9;
1215 }
36421338
JG
1216}
1217
0c195119
AD
1218/**
1219 * radeon_switcheroo_set_state - set switcheroo state
1220 *
1221 * @pdev: pci dev pointer
8e5de1d8 1222 * @state: vga_switcheroo state
0c195119
AD
1223 *
1224 * Callback for the switcheroo driver. Suspends or resumes the
1225 * the asics before or after it is powered up using ACPI methods.
1226 */
6a9ee8af
DA
1227static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1228{
1229 struct drm_device *dev = pci_get_drvdata(pdev);
4807c5a8 1230 struct radeon_device *rdev = dev->dev_private;
10ebc0bc 1231
90c4cde9 1232 if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
10ebc0bc
DA
1233 return;
1234
6a9ee8af 1235 if (state == VGA_SWITCHEROO_ON) {
d1f9809e
ML
1236 unsigned d3_delay = dev->pdev->d3_delay;
1237
6a9ee8af
DA
1238 printk(KERN_INFO "radeon: switched on\n");
1239 /* don't suspend or resume card normally */
5bcf719b 1240 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
d1f9809e 1241
4807c5a8 1242 if (d3_delay < 20 && (rdev->px_quirk_flags & RADEON_PX_QUIRK_LONG_WAKEUP))
d1f9809e
ML
1243 dev->pdev->d3_delay = 20;
1244
10ebc0bc 1245 radeon_resume_kms(dev, true, true);
d1f9809e
ML
1246
1247 dev->pdev->d3_delay = d3_delay;
1248
5bcf719b 1249 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 1250 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
1251 } else {
1252 printk(KERN_INFO "radeon: switched off\n");
fbf81762 1253 drm_kms_helper_poll_disable(dev);
5bcf719b 1254 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
274ad65c 1255 radeon_suspend_kms(dev, true, true, false);
5bcf719b 1256 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
1257 }
1258}
1259
0c195119
AD
1260/**
1261 * radeon_switcheroo_can_switch - see if switcheroo state can change
1262 *
1263 * @pdev: pci dev pointer
1264 *
1265 * Callback for the switcheroo driver. Check of the switcheroo
1266 * state can be changed.
1267 * Returns true if the state can be changed, false if not.
1268 */
6a9ee8af
DA
1269static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1270{
1271 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 1272
fc8fd40e
DV
1273 /*
1274 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1275 * locking inversion with the driver load path. And the access here is
1276 * completely racy anyway. So don't bother with locking for now.
1277 */
1278 return dev->open_count == 0;
6a9ee8af
DA
1279}
1280
26ec685f
TI
1281static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1282 .set_gpu_state = radeon_switcheroo_set_state,
1283 .reprobe = NULL,
1284 .can_switch = radeon_switcheroo_can_switch,
1285};
6a9ee8af 1286
0c195119
AD
1287/**
1288 * radeon_device_init - initialize the driver
1289 *
1290 * @rdev: radeon_device pointer
1291 * @pdev: drm dev pointer
1292 * @pdev: pci dev pointer
1293 * @flags: driver flags
1294 *
1295 * Initializes the driver info and hw (all asics).
1296 * Returns 0 for success or an error on failure.
1297 * Called at driver startup.
1298 */
771fe6b9
JG
1299int radeon_device_init(struct radeon_device *rdev,
1300 struct drm_device *ddev,
1301 struct pci_dev *pdev,
1302 uint32_t flags)
1303{
351a52a2 1304 int r, i;
ad49f501 1305 int dma_bits;
10ebc0bc 1306 bool runtime = false;
771fe6b9 1307
771fe6b9 1308 rdev->shutdown = false;
9f022ddf 1309 rdev->dev = &pdev->dev;
771fe6b9
JG
1310 rdev->ddev = ddev;
1311 rdev->pdev = pdev;
1312 rdev->flags = flags;
1313 rdev->family = flags & RADEON_FAMILY_MASK;
1314 rdev->is_atom_bios = false;
1315 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
edcd26e8 1316 rdev->mc.gtt_size = 512 * 1024 * 1024;
733289c2 1317 rdev->accel_working = false;
8b25ed34
AD
1318 /* set up ring ids */
1319 for (i = 0; i < RADEON_NUM_RINGS; i++) {
1320 rdev->ring[i].idx = i;
1321 }
954605ca 1322 rdev->fence_context = fence_context_alloc(RADEON_NUM_RINGS);
1b5331d9 1323
fe0d36e0
AD
1324 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1325 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1326 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1b5331d9 1327
771fe6b9
JG
1328 /* mutex initialization are all done here so we
1329 * can recall function without having locking issues */
d6999bc7 1330 mutex_init(&rdev->ring_lock);
40bacf16 1331 mutex_init(&rdev->dc_hw_i2c_mutex);
c20dc369 1332 atomic_set(&rdev->ih.lock, 0);
4c788679 1333 mutex_init(&rdev->gem.mutex);
c913e23a 1334 mutex_init(&rdev->pm.mutex);
6759a0a7 1335 mutex_init(&rdev->gpu_clock_mutex);
f61d5b46 1336 mutex_init(&rdev->srbm_mutex);
1c0a4625 1337 mutex_init(&rdev->grbm_idx_mutex);
db7fce39 1338 init_rwsem(&rdev->pm.mclk_lock);
dee53e7f 1339 init_rwsem(&rdev->exclusive_lock);
73a6d3fc 1340 init_waitqueue_head(&rdev->irq.vblank_queue);
341cb9e4
CK
1341 mutex_init(&rdev->mn_lock);
1342 hash_init(rdev->mn_hash);
1b9c3dd0
AD
1343 r = radeon_gem_init(rdev);
1344 if (r)
1345 return r;
529364e0 1346
c1c44132 1347 radeon_check_arguments(rdev);
23d4f1f2 1348 /* Adjust VM size here.
c1c44132 1349 * Max GPUVM size for cayman+ is 40 bits.
23d4f1f2 1350 */
20b2656d 1351 rdev->vm_manager.max_pfn = radeon_vm_size << 18;
771fe6b9 1352
4aac0473
JG
1353 /* Set asic functions */
1354 r = radeon_asic_init(rdev);
36421338 1355 if (r)
4aac0473 1356 return r;
4aac0473 1357
f95df9ca
AD
1358 /* all of the newer IGP chips have an internal gart
1359 * However some rs4xx report as AGP, so remove that here.
1360 */
1361 if ((rdev->family >= CHIP_RS400) &&
1362 (rdev->flags & RADEON_IS_IGP)) {
1363 rdev->flags &= ~RADEON_IS_AGP;
1364 }
1365
30256a3f 1366 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 1367 radeon_agp_disable(rdev);
771fe6b9
JG
1368 }
1369
9ed8b1f9
AD
1370 /* Set the internal MC address mask
1371 * This is the max address of the GPU's
1372 * internal address space.
1373 */
1374 if (rdev->family >= CHIP_CAYMAN)
1375 rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1376 else if (rdev->family >= CHIP_CEDAR)
1377 rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1378 else
1379 rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1380
ad49f501
DA
1381 /* set DMA mask + need_dma32 flags.
1382 * PCIE - can handle 40-bits.
005a83f1 1383 * IGP - can handle 40-bits
ad49f501 1384 * AGP - generally dma32 is safest
005a83f1 1385 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
1386 */
1387 rdev->need_dma32 = false;
1388 if (rdev->flags & RADEON_IS_AGP)
1389 rdev->need_dma32 = true;
005a83f1 1390 if ((rdev->flags & RADEON_IS_PCI) &&
4a2b6662 1391 (rdev->family <= CHIP_RS740))
ad49f501
DA
1392 rdev->need_dma32 = true;
1393
1394 dma_bits = rdev->need_dma32 ? 32 : 40;
1395 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 1396 if (r) {
62fff811 1397 rdev->need_dma32 = true;
c52494f6 1398 dma_bits = 32;
771fe6b9
JG
1399 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
1400 }
c52494f6
KRW
1401 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
1402 if (r) {
1403 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
1404 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
1405 }
771fe6b9
JG
1406
1407 /* Registers mapping */
1408 /* TODO: block userspace mapping of io register */
2c385151 1409 spin_lock_init(&rdev->mmio_idx_lock);
fe78118c 1410 spin_lock_init(&rdev->smc_idx_lock);
0a5b7b0b
AD
1411 spin_lock_init(&rdev->pll_idx_lock);
1412 spin_lock_init(&rdev->mc_idx_lock);
1413 spin_lock_init(&rdev->pcie_idx_lock);
1414 spin_lock_init(&rdev->pciep_idx_lock);
1415 spin_lock_init(&rdev->pif_idx_lock);
1416 spin_lock_init(&rdev->cg_idx_lock);
1417 spin_lock_init(&rdev->uvd_idx_lock);
1418 spin_lock_init(&rdev->rcu_idx_lock);
1419 spin_lock_init(&rdev->didt_idx_lock);
1420 spin_lock_init(&rdev->end_idx_lock);
efad86db
AD
1421 if (rdev->family >= CHIP_BONAIRE) {
1422 rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1423 rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1424 } else {
1425 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1426 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1427 }
771fe6b9
JG
1428 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1429 if (rdev->rmmio == NULL) {
1430 return -ENOMEM;
1431 }
1432 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
1433 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
1434
75efdee1
AD
1435 /* doorbell bar mapping */
1436 if (rdev->family >= CHIP_BONAIRE)
1437 radeon_doorbell_init(rdev);
1438
351a52a2
AD
1439 /* io port mapping */
1440 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1441 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1442 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1443 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1444 break;
1445 }
1446 }
1447 if (rdev->rio_mem == NULL)
1448 DRM_ERROR("Unable to find PCI I/O BAR\n");
1449
4807c5a8
AD
1450 if (rdev->flags & RADEON_IS_PX)
1451 radeon_device_handle_px_quirks(rdev);
1452
28d52043 1453 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
1454 /* this will fail for cards that aren't VGA class devices, just
1455 * ignore it */
1456 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
10ebc0bc 1457
bfaddd9f 1458 if (rdev->flags & RADEON_IS_PX)
10ebc0bc
DA
1459 runtime = true;
1460 vga_switcheroo_register_client(rdev->pdev, &radeon_switcheroo_ops, runtime);
1461 if (runtime)
1462 vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
28d52043 1463
3ce0a23d 1464 r = radeon_init(rdev);
b574f251 1465 if (r)
2e97140d 1466 goto failed;
3ce0a23d 1467
409851f4
JG
1468 r = radeon_gem_debugfs_init(rdev);
1469 if (r) {
1470 DRM_ERROR("registering gem debugfs failed (%d).\n", r);
9843ead0
DA
1471 }
1472
1473 r = radeon_mst_debugfs_init(rdev);
1474 if (r) {
1475 DRM_ERROR("registering mst debugfs failed (%d).\n", r);
409851f4
JG
1476 }
1477
b574f251
JG
1478 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1479 /* Acceleration not working on AGP card try again
1480 * with fallback to PCI or PCIE GART
1481 */
a2d07b74 1482 radeon_asic_reset(rdev);
b574f251
JG
1483 radeon_fini(rdev);
1484 radeon_agp_disable(rdev);
1485 r = radeon_init(rdev);
4aac0473 1486 if (r)
2e97140d 1487 goto failed;
771fe6b9 1488 }
6c7bccea 1489
13a7d299
CK
1490 r = radeon_ib_ring_tests(rdev);
1491 if (r)
1492 DRM_ERROR("ib ring test failed (%d).\n", r);
1493
6dfd1972
JG
1494 /*
1495 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1496 * after the CP ring have chew one packet at least. Hence here we stop
1497 * and restart DPM after the radeon_ib_ring_tests().
1498 */
1499 if (rdev->pm.dpm_enabled &&
1500 (rdev->pm.pm_method == PM_METHOD_DPM) &&
1501 (rdev->family == CHIP_TURKS) &&
1502 (rdev->flags & RADEON_IS_MOBILITY)) {
1503 mutex_lock(&rdev->pm.mutex);
1504 radeon_dpm_disable(rdev);
1505 radeon_dpm_enable(rdev);
1506 mutex_unlock(&rdev->pm.mutex);
1507 }
1508
60a7e396 1509 if ((radeon_testing & 1)) {
4a1132a0
AD
1510 if (rdev->accel_working)
1511 radeon_test_moves(rdev);
1512 else
1513 DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
ecc0b326 1514 }
60a7e396 1515 if ((radeon_testing & 2)) {
4a1132a0
AD
1516 if (rdev->accel_working)
1517 radeon_test_syncing(rdev);
1518 else
1519 DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
60a7e396 1520 }
771fe6b9 1521 if (radeon_benchmarking) {
4a1132a0
AD
1522 if (rdev->accel_working)
1523 radeon_benchmark(rdev, radeon_benchmarking);
1524 else
1525 DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
771fe6b9 1526 }
6cf8a3f5 1527 return 0;
2e97140d
AD
1528
1529failed:
b8751946
LW
1530 /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1531 if (radeon_is_px(ddev))
1532 pm_runtime_put_noidle(ddev->dev);
2e97140d
AD
1533 if (runtime)
1534 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1535 return r;
771fe6b9
JG
1536}
1537
4d8bf9ae
CK
1538static void radeon_debugfs_remove_files(struct radeon_device *rdev);
1539
0c195119
AD
1540/**
1541 * radeon_device_fini - tear down the driver
1542 *
1543 * @rdev: radeon_device pointer
1544 *
1545 * Tear down the driver info (all asics).
1546 * Called at driver shutdown.
1547 */
771fe6b9
JG
1548void radeon_device_fini(struct radeon_device *rdev)
1549{
771fe6b9
JG
1550 DRM_INFO("radeon: finishing device.\n");
1551 rdev->shutdown = true;
90aca4d2
JG
1552 /* evict vram memory */
1553 radeon_bo_evict_vram(rdev);
62a8ea3f 1554 radeon_fini(rdev);
6a9ee8af 1555 vga_switcheroo_unregister_client(rdev->pdev);
2e97140d
AD
1556 if (rdev->flags & RADEON_IS_PX)
1557 vga_switcheroo_fini_domain_pm_ops(rdev->dev);
c1176d6f 1558 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
1559 if (rdev->rio_mem)
1560 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 1561 rdev->rio_mem = NULL;
771fe6b9
JG
1562 iounmap(rdev->rmmio);
1563 rdev->rmmio = NULL;
75efdee1
AD
1564 if (rdev->family >= CHIP_BONAIRE)
1565 radeon_doorbell_fini(rdev);
4d8bf9ae 1566 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
1567}
1568
1569
1570/*
1571 * Suspend & resume.
1572 */
0c195119
AD
1573/**
1574 * radeon_suspend_kms - initiate device suspend
1575 *
1576 * @pdev: drm dev pointer
1577 * @state: suspend state
1578 *
1579 * Puts the hw in the suspend state (all asics).
1580 * Returns 0 for success or an error on failure.
1581 * Called at driver suspend.
1582 */
274ad65c
JG
1583int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1584 bool fbcon, bool freeze)
771fe6b9 1585{
875c1866 1586 struct radeon_device *rdev;
771fe6b9 1587 struct drm_crtc *crtc;
d8dcaa1d 1588 struct drm_connector *connector;
7465280c 1589 int i, r;
771fe6b9 1590
875c1866 1591 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
1592 return -ENODEV;
1593 }
7473e830 1594
875c1866
DJ
1595 rdev = dev->dev_private;
1596
5bcf719b 1597 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 1598 return 0;
d8dcaa1d 1599
86698c20
SF
1600 drm_kms_helper_poll_disable(dev);
1601
6adaed5b 1602 drm_modeset_lock_all(dev);
d8dcaa1d
AD
1603 /* turn off display hw */
1604 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1605 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1606 }
6adaed5b 1607 drm_modeset_unlock_all(dev);
d8dcaa1d 1608
f3cbb17b 1609 /* unpin the front buffers and cursors */
771fe6b9 1610 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
f3cbb17b 1611 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
f4510a27 1612 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->primary->fb);
4c788679 1613 struct radeon_bo *robj;
771fe6b9 1614
f3cbb17b
GG
1615 if (radeon_crtc->cursor_bo) {
1616 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1617 r = radeon_bo_reserve(robj, false);
1618 if (r == 0) {
1619 radeon_bo_unpin(robj);
1620 radeon_bo_unreserve(robj);
1621 }
1622 }
1623
771fe6b9
JG
1624 if (rfb == NULL || rfb->obj == NULL) {
1625 continue;
1626 }
7e4d15d9 1627 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
1628 /* don't unpin kernel fb objects */
1629 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 1630 r = radeon_bo_reserve(robj, false);
38651674 1631 if (r == 0) {
4c788679
JG
1632 radeon_bo_unpin(robj);
1633 radeon_bo_unreserve(robj);
1634 }
771fe6b9
JG
1635 }
1636 }
1637 /* evict vram memory */
4c788679 1638 radeon_bo_evict_vram(rdev);
8a47cc9e 1639
771fe6b9 1640 /* wait for gpu to finish processing current batch */
5f8f635e 1641 for (i = 0; i < RADEON_NUM_RINGS; i++) {
37615527 1642 r = radeon_fence_wait_empty(rdev, i);
5f8f635e
JG
1643 if (r) {
1644 /* delay GPU reset to resume */
eb98c709 1645 radeon_fence_driver_force_completion(rdev, i);
5f8f635e
JG
1646 }
1647 }
771fe6b9 1648
f657c2a7
YZ
1649 radeon_save_bios_scratch_regs(rdev);
1650
62a8ea3f 1651 radeon_suspend(rdev);
d4877cf2 1652 radeon_hpd_fini(rdev);
771fe6b9 1653 /* evict remaining vram memory */
4c788679 1654 radeon_bo_evict_vram(rdev);
771fe6b9 1655
10b06122
JG
1656 radeon_agp_suspend(rdev);
1657
771fe6b9 1658 pci_save_state(dev->pdev);
ccaa2c12 1659 if (freeze && rdev->family >= CHIP_CEDAR) {
274ad65c
JG
1660 rdev->asic->asic_reset(rdev, true);
1661 pci_restore_state(dev->pdev);
1662 } else if (suspend) {
771fe6b9
JG
1663 /* Shut down the device */
1664 pci_disable_device(dev->pdev);
1665 pci_set_power_state(dev->pdev, PCI_D3hot);
1666 }
10ebc0bc
DA
1667
1668 if (fbcon) {
1669 console_lock();
1670 radeon_fbdev_set_suspend(rdev, 1);
1671 console_unlock();
1672 }
771fe6b9
JG
1673 return 0;
1674}
1675
0c195119
AD
1676/**
1677 * radeon_resume_kms - initiate device resume
1678 *
1679 * @pdev: drm dev pointer
1680 *
1681 * Bring the hw back to operating state (all asics).
1682 * Returns 0 for success or an error on failure.
1683 * Called at driver resume.
1684 */
10ebc0bc 1685int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
771fe6b9 1686{
09bdf591 1687 struct drm_connector *connector;
771fe6b9 1688 struct radeon_device *rdev = dev->dev_private;
f3cbb17b 1689 struct drm_crtc *crtc;
04eb2206 1690 int r;
771fe6b9 1691
5bcf719b 1692 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
1693 return 0;
1694
10ebc0bc
DA
1695 if (fbcon) {
1696 console_lock();
1697 }
7473e830
DA
1698 if (resume) {
1699 pci_set_power_state(dev->pdev, PCI_D0);
1700 pci_restore_state(dev->pdev);
1701 if (pci_enable_device(dev->pdev)) {
10ebc0bc
DA
1702 if (fbcon)
1703 console_unlock();
7473e830
DA
1704 return -1;
1705 }
771fe6b9 1706 }
0ebf1717
DA
1707 /* resume AGP if in use */
1708 radeon_agp_resume(rdev);
62a8ea3f 1709 radeon_resume(rdev);
04eb2206
CK
1710
1711 r = radeon_ib_ring_tests(rdev);
1712 if (r)
1713 DRM_ERROR("ib ring test failed (%d).\n", r);
1714
bc6a6295 1715 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
6c7bccea
AD
1716 /* do dpm late init */
1717 r = radeon_pm_late_init(rdev);
1718 if (r) {
1719 rdev->pm.dpm_enabled = false;
1720 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1721 }
bc6a6295
AD
1722 } else {
1723 /* resume old pm late */
1724 radeon_pm_resume(rdev);
6c7bccea
AD
1725 }
1726
f657c2a7 1727 radeon_restore_bios_scratch_regs(rdev);
09bdf591 1728
f3cbb17b
GG
1729 /* pin cursors */
1730 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1731 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1732
1733 if (radeon_crtc->cursor_bo) {
1734 struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1735 r = radeon_bo_reserve(robj, false);
1736 if (r == 0) {
1737 /* Only 27 bit offset for legacy cursor */
1738 r = radeon_bo_pin_restricted(robj,
1739 RADEON_GEM_DOMAIN_VRAM,
1740 ASIC_IS_AVIVO(rdev) ?
1741 0 : 1 << 27,
1742 &radeon_crtc->cursor_addr);
1743 if (r != 0)
1744 DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1745 radeon_bo_unreserve(robj);
1746 }
1747 }
1748 }
1749
3fa47d9e
AD
1750 /* init dig PHYs, disp eng pll */
1751 if (rdev->is_atom_bios) {
ac89af1e 1752 radeon_atom_encoder_init(rdev);
f3f1f03e 1753 radeon_atom_disp_eng_pll_init(rdev);
bced76f2
AD
1754 /* turn on the BL */
1755 if (rdev->mode_info.bl_encoder) {
1756 u8 bl_level = radeon_get_backlight_level(rdev,
1757 rdev->mode_info.bl_encoder);
1758 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1759 bl_level);
1760 }
3fa47d9e 1761 }
d4877cf2
AD
1762 /* reset hpd state */
1763 radeon_hpd_init(rdev);
771fe6b9 1764 /* blat the mode back in */
ec9954fc
DA
1765 if (fbcon) {
1766 drm_helper_resume_force_mode(dev);
1767 /* turn on display hw */
6adaed5b 1768 drm_modeset_lock_all(dev);
ec9954fc
DA
1769 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1770 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1771 }
6adaed5b 1772 drm_modeset_unlock_all(dev);
a93f344d 1773 }
86698c20
SF
1774
1775 drm_kms_helper_poll_enable(dev);
18ee37a4 1776
3640da2f
AD
1777 /* set the power state here in case we are a PX system or headless */
1778 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1779 radeon_pm_compute_clocks(rdev);
1780
18ee37a4
DV
1781 if (fbcon) {
1782 radeon_fbdev_set_suspend(rdev, 0);
1783 console_unlock();
1784 }
1785
771fe6b9
JG
1786 return 0;
1787}
1788
0c195119
AD
1789/**
1790 * radeon_gpu_reset - reset the asic
1791 *
1792 * @rdev: radeon device pointer
1793 *
1794 * Attempt the reset the GPU if it has hung (all asics).
1795 * Returns 0 for success or an error on failure.
1796 */
90aca4d2
JG
1797int radeon_gpu_reset(struct radeon_device *rdev)
1798{
55d7c221
CK
1799 unsigned ring_sizes[RADEON_NUM_RINGS];
1800 uint32_t *ring_data[RADEON_NUM_RINGS];
1801
1802 bool saved = false;
1803
1804 int i, r;
8fd1b84c 1805 int resched;
90aca4d2 1806
dee53e7f 1807 down_write(&rdev->exclusive_lock);
f9eaf9ae
CK
1808
1809 if (!rdev->needs_reset) {
1810 up_write(&rdev->exclusive_lock);
1811 return 0;
1812 }
1813
72b9076b
MO
1814 atomic_inc(&rdev->gpu_reset_counter);
1815
90aca4d2 1816 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
1817 /* block TTM */
1818 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2 1819 radeon_suspend(rdev);
73ef0e0d 1820 radeon_hpd_fini(rdev);
90aca4d2 1821
55d7c221
CK
1822 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1823 ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1824 &ring_data[i]);
1825 if (ring_sizes[i]) {
1826 saved = true;
1827 dev_info(rdev->dev, "Saved %d dwords of commands "
1828 "on ring %d.\n", ring_sizes[i], i);
1829 }
1830 }
1831
90aca4d2
JG
1832 r = radeon_asic_reset(rdev);
1833 if (!r) {
55d7c221 1834 dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
90aca4d2 1835 radeon_resume(rdev);
55d7c221 1836 }
04eb2206 1837
55d7c221 1838 radeon_restore_bios_scratch_regs(rdev);
04eb2206 1839
9bb39ff4
ML
1840 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1841 if (!r && ring_data[i]) {
55d7c221
CK
1842 radeon_ring_restore(rdev, &rdev->ring[i],
1843 ring_sizes[i], ring_data[i]);
9bb39ff4 1844 } else {
eb98c709 1845 radeon_fence_driver_force_completion(rdev, i);
55d7c221
CK
1846 kfree(ring_data[i]);
1847 }
90aca4d2 1848 }
7a1619b9 1849
c940b447
AD
1850 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1851 /* do dpm late init */
1852 r = radeon_pm_late_init(rdev);
1853 if (r) {
1854 rdev->pm.dpm_enabled = false;
1855 DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1856 }
1857 } else {
1858 /* resume old pm late */
1859 radeon_pm_resume(rdev);
1860 }
1861
73ef0e0d
AD
1862 /* init dig PHYs, disp eng pll */
1863 if (rdev->is_atom_bios) {
1864 radeon_atom_encoder_init(rdev);
1865 radeon_atom_disp_eng_pll_init(rdev);
1866 /* turn on the BL */
1867 if (rdev->mode_info.bl_encoder) {
1868 u8 bl_level = radeon_get_backlight_level(rdev,
1869 rdev->mode_info.bl_encoder);
1870 radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1871 bl_level);
1872 }
1873 }
1874 /* reset hpd state */
1875 radeon_hpd_init(rdev);
1876
9bb39ff4 1877 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
3c036389
CK
1878
1879 rdev->in_reset = true;
1880 rdev->needs_reset = false;
1881
9bb39ff4
ML
1882 downgrade_write(&rdev->exclusive_lock);
1883
d3493574
JG
1884 drm_helper_resume_force_mode(rdev->ddev);
1885
c940b447
AD
1886 /* set the power state here in case we are a PX system or headless */
1887 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1888 radeon_pm_compute_clocks(rdev);
1889
9bb39ff4
ML
1890 if (!r) {
1891 r = radeon_ib_ring_tests(rdev);
1892 if (r && saved)
1893 r = -EAGAIN;
1894 } else {
7a1619b9
MD
1895 /* bad news, how to tell it to userspace ? */
1896 dev_info(rdev->dev, "GPU reset failed\n");
1897 }
1898
9bb39ff4
ML
1899 rdev->needs_reset = r == -EAGAIN;
1900 rdev->in_reset = false;
1901
1902 up_read(&rdev->exclusive_lock);
90aca4d2
JG
1903 return r;
1904}
1905
771fe6b9
JG
1906
1907/*
1908 * Debugfs
1909 */
771fe6b9
JG
1910int radeon_debugfs_add_files(struct radeon_device *rdev,
1911 struct drm_info_list *files,
1912 unsigned nfiles)
1913{
1914 unsigned i;
1915
4d8bf9ae
CK
1916 for (i = 0; i < rdev->debugfs_count; i++) {
1917 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1918 /* Already registered */
1919 return 0;
1920 }
1921 }
c245cb9e 1922
4d8bf9ae 1923 i = rdev->debugfs_count + 1;
c245cb9e
MW
1924 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1925 DRM_ERROR("Reached maximum number of debugfs components.\n");
1926 DRM_ERROR("Report so we increase "
3cf8bb1a 1927 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1928 return -EINVAL;
1929 }
4d8bf9ae
CK
1930 rdev->debugfs[rdev->debugfs_count].files = files;
1931 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1932 rdev->debugfs_count = i;
771fe6b9
JG
1933#if defined(CONFIG_DEBUG_FS)
1934 drm_debugfs_create_files(files, nfiles,
1935 rdev->ddev->control->debugfs_root,
1936 rdev->ddev->control);
1937 drm_debugfs_create_files(files, nfiles,
1938 rdev->ddev->primary->debugfs_root,
1939 rdev->ddev->primary);
1940#endif
1941 return 0;
1942}
1943
4d8bf9ae
CK
1944static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1945{
1946#if defined(CONFIG_DEBUG_FS)
1947 unsigned i;
1948
1949 for (i = 0; i < rdev->debugfs_count; i++) {
1950 drm_debugfs_remove_files(rdev->debugfs[i].files,
1951 rdev->debugfs[i].num_files,
1952 rdev->ddev->control);
1953 drm_debugfs_remove_files(rdev->debugfs[i].files,
1954 rdev->debugfs[i].num_files,
1955 rdev->ddev->primary);
1956 }
1957#endif
1958}
1959
771fe6b9
JG
1960#if defined(CONFIG_DEBUG_FS)
1961int radeon_debugfs_init(struct drm_minor *minor)
1962{
1963 return 0;
1964}
1965
1966void radeon_debugfs_cleanup(struct drm_minor *minor)
1967{
771fe6b9
JG
1968}
1969#endif
This page took 0.729315 seconds and 5 git commands to generate.