Merge 3.4-rc6 into usb-next
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_device.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#include <linux/console.h>
5a0e3ad6 29#include <linux/slab.h>
771fe6b9
JG
30#include <drm/drmP.h>
31#include <drm/drm_crtc_helper.h>
32#include <drm/radeon_drm.h>
28d52043 33#include <linux/vgaarb.h>
6a9ee8af 34#include <linux/vga_switcheroo.h>
bcc65fd8 35#include <linux/efi.h>
771fe6b9
JG
36#include "radeon_reg.h"
37#include "radeon.h"
771fe6b9
JG
38#include "atom.h"
39
1b5331d9
JG
40static const char radeon_family_name[][16] = {
41 "R100",
42 "RV100",
43 "RS100",
44 "RV200",
45 "RS200",
46 "R200",
47 "RV250",
48 "RS300",
49 "RV280",
50 "R300",
51 "R350",
52 "RV350",
53 "RV380",
54 "R420",
55 "R423",
56 "RV410",
57 "RS400",
58 "RS480",
59 "RS600",
60 "RS690",
61 "RS740",
62 "RV515",
63 "R520",
64 "RV530",
65 "RV560",
66 "RV570",
67 "R580",
68 "R600",
69 "RV610",
70 "RV630",
71 "RV670",
72 "RV620",
73 "RV635",
74 "RS780",
75 "RS880",
76 "RV770",
77 "RV730",
78 "RV710",
79 "RV740",
80 "CEDAR",
81 "REDWOOD",
82 "JUNIPER",
83 "CYPRESS",
84 "HEMLOCK",
b08ebe7e 85 "PALM",
4df64e65
AD
86 "SUMO",
87 "SUMO2",
1fe18305
AD
88 "BARTS",
89 "TURKS",
90 "CAICOS",
b7cfc9fe 91 "CAYMAN",
8848f759 92 "ARUBA",
cb28bb34
AD
93 "TAHITI",
94 "PITCAIRN",
95 "VERDE",
1b5331d9
JG
96 "LAST",
97};
98
b1e3a6d1
MD
99/*
100 * Clear GPU surface registers.
101 */
3ce0a23d 102void radeon_surface_init(struct radeon_device *rdev)
b1e3a6d1
MD
103{
104 /* FIXME: check this out */
105 if (rdev->family < CHIP_R600) {
106 int i;
107
550e2d92
DA
108 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
109 if (rdev->surface_regs[i].bo)
110 radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
111 else
112 radeon_clear_surface_reg(rdev, i);
b1e3a6d1 113 }
e024e110
DA
114 /* enable surfaces */
115 WREG32(RADEON_SURFACE_CNTL, 0);
b1e3a6d1
MD
116 }
117}
118
771fe6b9
JG
119/*
120 * GPU scratch registers helpers function.
121 */
3ce0a23d 122void radeon_scratch_init(struct radeon_device *rdev)
771fe6b9
JG
123{
124 int i;
125
126 /* FIXME: check this out */
127 if (rdev->family < CHIP_R300) {
128 rdev->scratch.num_reg = 5;
129 } else {
130 rdev->scratch.num_reg = 7;
131 }
724c80e1 132 rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
771fe6b9
JG
133 for (i = 0; i < rdev->scratch.num_reg; i++) {
134 rdev->scratch.free[i] = true;
724c80e1 135 rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
771fe6b9
JG
136 }
137}
138
139int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
140{
141 int i;
142
143 for (i = 0; i < rdev->scratch.num_reg; i++) {
144 if (rdev->scratch.free[i]) {
145 rdev->scratch.free[i] = false;
146 *reg = rdev->scratch.reg[i];
147 return 0;
148 }
149 }
150 return -EINVAL;
151}
152
153void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
154{
155 int i;
156
157 for (i = 0; i < rdev->scratch.num_reg; i++) {
158 if (rdev->scratch.reg[i] == reg) {
159 rdev->scratch.free[i] = true;
160 return;
161 }
162 }
163}
164
724c80e1
AD
165void radeon_wb_disable(struct radeon_device *rdev)
166{
167 int r;
168
169 if (rdev->wb.wb_obj) {
170 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
171 if (unlikely(r != 0))
172 return;
173 radeon_bo_kunmap(rdev->wb.wb_obj);
174 radeon_bo_unpin(rdev->wb.wb_obj);
175 radeon_bo_unreserve(rdev->wb.wb_obj);
176 }
177 rdev->wb.enabled = false;
178}
179
180void radeon_wb_fini(struct radeon_device *rdev)
181{
182 radeon_wb_disable(rdev);
183 if (rdev->wb.wb_obj) {
184 radeon_bo_unref(&rdev->wb.wb_obj);
185 rdev->wb.wb = NULL;
186 rdev->wb.wb_obj = NULL;
187 }
188}
189
190int radeon_wb_init(struct radeon_device *rdev)
191{
192 int r;
193
194 if (rdev->wb.wb_obj == NULL) {
441921d5 195 r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
724c80e1
AD
196 RADEON_GEM_DOMAIN_GTT, &rdev->wb.wb_obj);
197 if (r) {
198 dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
199 return r;
200 }
201 }
202 r = radeon_bo_reserve(rdev->wb.wb_obj, false);
203 if (unlikely(r != 0)) {
204 radeon_wb_fini(rdev);
205 return r;
206 }
207 r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
208 &rdev->wb.gpu_addr);
209 if (r) {
210 radeon_bo_unreserve(rdev->wb.wb_obj);
211 dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
212 radeon_wb_fini(rdev);
213 return r;
214 }
215 r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
216 radeon_bo_unreserve(rdev->wb.wb_obj);
217 if (r) {
218 dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
219 radeon_wb_fini(rdev);
220 return r;
221 }
222
e6ba7599
AD
223 /* clear wb memory */
224 memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
d0f8a854
AD
225 /* disable event_write fences */
226 rdev->wb.use_event = false;
724c80e1
AD
227 /* disabled via module param */
228 if (radeon_no_wb == 1)
229 rdev->wb.enabled = false;
230 else {
724c80e1 231 if (rdev->flags & RADEON_IS_AGP) {
28eebb70
AD
232 /* often unreliable on AGP */
233 rdev->wb.enabled = false;
234 } else if (rdev->family < CHIP_R300) {
235 /* often unreliable on pre-r300 */
724c80e1 236 rdev->wb.enabled = false;
d0f8a854 237 } else {
724c80e1 238 rdev->wb.enabled = true;
d0f8a854
AD
239 /* event_write fences are only available on r600+ */
240 if (rdev->family >= CHIP_R600)
241 rdev->wb.use_event = true;
242 }
724c80e1 243 }
c994ead6
AD
244 /* always use writeback/events on NI, APUs */
245 if (rdev->family >= CHIP_PALM) {
7d52785d
AD
246 rdev->wb.enabled = true;
247 rdev->wb.use_event = true;
248 }
724c80e1
AD
249
250 dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
251
252 return 0;
253}
254
d594e46a
JG
255/**
256 * radeon_vram_location - try to find VRAM location
257 * @rdev: radeon device structure holding all necessary informations
258 * @mc: memory controller structure holding memory informations
259 * @base: base address at which to put VRAM
260 *
261 * Function will place try to place VRAM at base address provided
262 * as parameter (which is so far either PCI aperture address or
263 * for IGP TOM base address).
264 *
265 * If there is not enough space to fit the unvisible VRAM in the 32bits
266 * address space then we limit the VRAM size to the aperture.
267 *
268 * If we are using AGP and if the AGP aperture doesn't allow us to have
269 * room for all the VRAM than we restrict the VRAM to the PCI aperture
270 * size and print a warning.
271 *
272 * This function will never fails, worst case are limiting VRAM.
273 *
274 * Note: GTT start, end, size should be initialized before calling this
275 * function on AGP platform.
276 *
25985edc 277 * Note: We don't explicitly enforce VRAM start to be aligned on VRAM size,
d594e46a
JG
278 * this shouldn't be a problem as we are using the PCI aperture as a reference.
279 * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
280 * not IGP.
281 *
282 * Note: we use mc_vram_size as on some board we need to program the mc to
283 * cover the whole aperture even if VRAM size is inferior to aperture size
284 * Novell bug 204882 + along with lots of ubuntu ones
285 *
286 * Note: when limiting vram it's safe to overwritte real_vram_size because
287 * we are not in case where real_vram_size is inferior to mc_vram_size (ie
288 * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
289 * ones)
290 *
291 * Note: IGP TOM addr should be the same as the aperture addr, we don't
292 * explicitly check for that thought.
293 *
294 * FIXME: when reducing VRAM size align new size on power of 2.
771fe6b9 295 */
d594e46a 296void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
771fe6b9 297{
d594e46a
JG
298 mc->vram_start = base;
299 if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
300 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
301 mc->real_vram_size = mc->aper_size;
302 mc->mc_vram_size = mc->aper_size;
303 }
304 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
2cbeb4ef 305 if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
d594e46a
JG
306 dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
307 mc->real_vram_size = mc->aper_size;
308 mc->mc_vram_size = mc->aper_size;
309 }
310 mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
ba95c45a
MD
311 if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
312 mc->real_vram_size = radeon_vram_limit;
dd7cc55a 313 dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
d594e46a
JG
314 mc->mc_vram_size >> 20, mc->vram_start,
315 mc->vram_end, mc->real_vram_size >> 20);
316}
771fe6b9 317
d594e46a
JG
318/**
319 * radeon_gtt_location - try to find GTT location
320 * @rdev: radeon device structure holding all necessary informations
321 * @mc: memory controller structure holding memory informations
322 *
323 * Function will place try to place GTT before or after VRAM.
324 *
325 * If GTT size is bigger than space left then we ajust GTT size.
326 * Thus function will never fails.
327 *
328 * FIXME: when reducing GTT size align new size on power of 2.
329 */
330void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
331{
332 u64 size_af, size_bf;
333
8d369bb1
AD
334 size_af = ((0xFFFFFFFF - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
335 size_bf = mc->vram_start & ~mc->gtt_base_align;
d594e46a
JG
336 if (size_bf > size_af) {
337 if (mc->gtt_size > size_bf) {
338 dev_warn(rdev->dev, "limiting GTT\n");
339 mc->gtt_size = size_bf;
771fe6b9 340 }
8d369bb1 341 mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
771fe6b9 342 } else {
d594e46a
JG
343 if (mc->gtt_size > size_af) {
344 dev_warn(rdev->dev, "limiting GTT\n");
345 mc->gtt_size = size_af;
346 }
8d369bb1 347 mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
771fe6b9 348 }
d594e46a 349 mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
dd7cc55a 350 dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
d594e46a 351 mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
771fe6b9
JG
352}
353
771fe6b9
JG
354/*
355 * GPU helpers function.
356 */
9f022ddf 357bool radeon_card_posted(struct radeon_device *rdev)
771fe6b9
JG
358{
359 uint32_t reg;
360
bcc65fd8
MG
361 if (efi_enabled && rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE)
362 return false;
363
771fe6b9 364 /* first check CRTCs */
18007401
AD
365 if (ASIC_IS_DCE41(rdev)) {
366 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
367 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
368 if (reg & EVERGREEN_CRTC_MASTER_EN)
369 return true;
370 } else if (ASIC_IS_DCE4(rdev)) {
bcc1c2a1
AD
371 reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
372 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
373 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
374 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
375 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
376 RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
377 if (reg & EVERGREEN_CRTC_MASTER_EN)
378 return true;
379 } else if (ASIC_IS_AVIVO(rdev)) {
771fe6b9
JG
380 reg = RREG32(AVIVO_D1CRTC_CONTROL) |
381 RREG32(AVIVO_D2CRTC_CONTROL);
382 if (reg & AVIVO_CRTC_EN) {
383 return true;
384 }
385 } else {
386 reg = RREG32(RADEON_CRTC_GEN_CNTL) |
387 RREG32(RADEON_CRTC2_GEN_CNTL);
388 if (reg & RADEON_CRTC_EN) {
389 return true;
390 }
391 }
392
393 /* then check MEM_SIZE, in case the crtcs are off */
394 if (rdev->family >= CHIP_R600)
395 reg = RREG32(R600_CONFIG_MEMSIZE);
396 else
397 reg = RREG32(RADEON_CONFIG_MEMSIZE);
398
399 if (reg)
400 return true;
401
402 return false;
403
404}
405
f47299c5
AD
406void radeon_update_bandwidth_info(struct radeon_device *rdev)
407{
408 fixed20_12 a;
8807286e
AD
409 u32 sclk = rdev->pm.current_sclk;
410 u32 mclk = rdev->pm.current_mclk;
f47299c5 411
8807286e
AD
412 /* sclk/mclk in Mhz */
413 a.full = dfixed_const(100);
414 rdev->pm.sclk.full = dfixed_const(sclk);
415 rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
416 rdev->pm.mclk.full = dfixed_const(mclk);
417 rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
f47299c5 418
8807286e 419 if (rdev->flags & RADEON_IS_IGP) {
68adac5e 420 a.full = dfixed_const(16);
f47299c5 421 /* core_bandwidth = sclk(Mhz) * 16 */
68adac5e 422 rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
f47299c5
AD
423 }
424}
425
72542d77
DA
426bool radeon_boot_test_post_card(struct radeon_device *rdev)
427{
428 if (radeon_card_posted(rdev))
429 return true;
430
431 if (rdev->bios) {
432 DRM_INFO("GPU not posted. posting now...\n");
433 if (rdev->is_atom_bios)
434 atom_asic_init(rdev->mode_info.atom_context);
435 else
436 radeon_combios_asic_init(rdev->ddev);
437 return true;
438 } else {
439 dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
440 return false;
441 }
442}
443
3ce0a23d
JG
444int radeon_dummy_page_init(struct radeon_device *rdev)
445{
82568565
DA
446 if (rdev->dummy_page.page)
447 return 0;
3ce0a23d
JG
448 rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
449 if (rdev->dummy_page.page == NULL)
450 return -ENOMEM;
451 rdev->dummy_page.addr = pci_map_page(rdev->pdev, rdev->dummy_page.page,
452 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
a30f6fb7
BH
453 if (pci_dma_mapping_error(rdev->pdev, rdev->dummy_page.addr)) {
454 dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
3ce0a23d
JG
455 __free_page(rdev->dummy_page.page);
456 rdev->dummy_page.page = NULL;
457 return -ENOMEM;
458 }
459 return 0;
460}
461
462void radeon_dummy_page_fini(struct radeon_device *rdev)
463{
464 if (rdev->dummy_page.page == NULL)
465 return;
466 pci_unmap_page(rdev->pdev, rdev->dummy_page.addr,
467 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
468 __free_page(rdev->dummy_page.page);
469 rdev->dummy_page.page = NULL;
470}
471
771fe6b9 472
771fe6b9
JG
473/* ATOM accessor methods */
474static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
475{
476 struct radeon_device *rdev = info->dev->dev_private;
477 uint32_t r;
478
479 r = rdev->pll_rreg(rdev, reg);
480 return r;
481}
482
483static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
484{
485 struct radeon_device *rdev = info->dev->dev_private;
486
487 rdev->pll_wreg(rdev, reg, val);
488}
489
490static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
491{
492 struct radeon_device *rdev = info->dev->dev_private;
493 uint32_t r;
494
495 r = rdev->mc_rreg(rdev, reg);
496 return r;
497}
498
499static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
500{
501 struct radeon_device *rdev = info->dev->dev_private;
502
503 rdev->mc_wreg(rdev, reg, val);
504}
505
506static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
507{
508 struct radeon_device *rdev = info->dev->dev_private;
509
510 WREG32(reg*4, val);
511}
512
513static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
514{
515 struct radeon_device *rdev = info->dev->dev_private;
516 uint32_t r;
517
518 r = RREG32(reg*4);
519 return r;
520}
521
351a52a2
AD
522static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
523{
524 struct radeon_device *rdev = info->dev->dev_private;
525
526 WREG32_IO(reg*4, val);
527}
528
529static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
530{
531 struct radeon_device *rdev = info->dev->dev_private;
532 uint32_t r;
533
534 r = RREG32_IO(reg*4);
535 return r;
536}
537
771fe6b9
JG
538int radeon_atombios_init(struct radeon_device *rdev)
539{
61c4b24b
MF
540 struct card_info *atom_card_info =
541 kzalloc(sizeof(struct card_info), GFP_KERNEL);
542
543 if (!atom_card_info)
544 return -ENOMEM;
545
546 rdev->mode_info.atom_card_info = atom_card_info;
547 atom_card_info->dev = rdev->ddev;
548 atom_card_info->reg_read = cail_reg_read;
549 atom_card_info->reg_write = cail_reg_write;
351a52a2
AD
550 /* needed for iio ops */
551 if (rdev->rio_mem) {
552 atom_card_info->ioreg_read = cail_ioreg_read;
553 atom_card_info->ioreg_write = cail_ioreg_write;
554 } else {
555 DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
556 atom_card_info->ioreg_read = cail_reg_read;
557 atom_card_info->ioreg_write = cail_reg_write;
558 }
61c4b24b
MF
559 atom_card_info->mc_read = cail_mc_read;
560 atom_card_info->mc_write = cail_mc_write;
561 atom_card_info->pll_read = cail_pll_read;
562 atom_card_info->pll_write = cail_pll_write;
563
564 rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
c31ad97f 565 mutex_init(&rdev->mode_info.atom_context->mutex);
771fe6b9 566 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
d904ef9b 567 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
771fe6b9
JG
568 return 0;
569}
570
571void radeon_atombios_fini(struct radeon_device *rdev)
572{
4a04a844
JG
573 if (rdev->mode_info.atom_context) {
574 kfree(rdev->mode_info.atom_context->scratch);
575 kfree(rdev->mode_info.atom_context);
576 }
61c4b24b 577 kfree(rdev->mode_info.atom_card_info);
771fe6b9
JG
578}
579
580int radeon_combios_init(struct radeon_device *rdev)
581{
582 radeon_combios_initialize_bios_scratch_regs(rdev->ddev);
583 return 0;
584}
585
586void radeon_combios_fini(struct radeon_device *rdev)
587{
588}
589
28d52043
DA
590/* if we get transitioned to only one device, tak VGA back */
591static unsigned int radeon_vga_set_decode(void *cookie, bool state)
592{
593 struct radeon_device *rdev = cookie;
28d52043
DA
594 radeon_vga_set_state(rdev, state);
595 if (state)
596 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
597 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
598 else
599 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
600}
c1176d6f 601
36421338
JG
602void radeon_check_arguments(struct radeon_device *rdev)
603{
604 /* vramlimit must be a power of two */
605 switch (radeon_vram_limit) {
606 case 0:
607 case 4:
608 case 8:
609 case 16:
610 case 32:
611 case 64:
612 case 128:
613 case 256:
614 case 512:
615 case 1024:
616 case 2048:
617 case 4096:
618 break;
619 default:
620 dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
621 radeon_vram_limit);
622 radeon_vram_limit = 0;
623 break;
624 }
625 radeon_vram_limit = radeon_vram_limit << 20;
626 /* gtt size must be power of two and greater or equal to 32M */
627 switch (radeon_gart_size) {
628 case 4:
629 case 8:
630 case 16:
631 dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
632 radeon_gart_size);
633 radeon_gart_size = 512;
634 break;
635 case 32:
636 case 64:
637 case 128:
638 case 256:
639 case 512:
640 case 1024:
641 case 2048:
642 case 4096:
643 break;
644 default:
645 dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
646 radeon_gart_size);
647 radeon_gart_size = 512;
648 break;
649 }
650 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
651 /* AGP mode can only be -1, 1, 2, 4, 8 */
652 switch (radeon_agpmode) {
653 case -1:
654 case 0:
655 case 1:
656 case 2:
657 case 4:
658 case 8:
659 break;
660 default:
661 dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
662 "-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
663 radeon_agpmode = 0;
664 break;
665 }
666}
667
6a9ee8af
DA
668static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
669{
670 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af
DA
671 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
672 if (state == VGA_SWITCHEROO_ON) {
673 printk(KERN_INFO "radeon: switched on\n");
674 /* don't suspend or resume card normally */
5bcf719b 675 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 676 radeon_resume_kms(dev);
5bcf719b 677 dev->switch_power_state = DRM_SWITCH_POWER_ON;
fbf81762 678 drm_kms_helper_poll_enable(dev);
6a9ee8af
DA
679 } else {
680 printk(KERN_INFO "radeon: switched off\n");
fbf81762 681 drm_kms_helper_poll_disable(dev);
5bcf719b 682 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af 683 radeon_suspend_kms(dev, pmm);
5bcf719b 684 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
685 }
686}
687
688static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
689{
690 struct drm_device *dev = pci_get_drvdata(pdev);
691 bool can_switch;
692
693 spin_lock(&dev->count_lock);
694 can_switch = (dev->open_count == 0);
695 spin_unlock(&dev->count_lock);
696 return can_switch;
697}
698
699
771fe6b9
JG
700int radeon_device_init(struct radeon_device *rdev,
701 struct drm_device *ddev,
702 struct pci_dev *pdev,
703 uint32_t flags)
704{
351a52a2 705 int r, i;
ad49f501 706 int dma_bits;
771fe6b9 707
771fe6b9 708 rdev->shutdown = false;
9f022ddf 709 rdev->dev = &pdev->dev;
771fe6b9
JG
710 rdev->ddev = ddev;
711 rdev->pdev = pdev;
712 rdev->flags = flags;
713 rdev->family = flags & RADEON_FAMILY_MASK;
714 rdev->is_atom_bios = false;
715 rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
716 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
717 rdev->gpu_lockup = false;
733289c2 718 rdev->accel_working = false;
1b5331d9 719
d522d9cc
TR
720 DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X).\n",
721 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
722 pdev->subsystem_vendor, pdev->subsystem_device);
1b5331d9 723
771fe6b9
JG
724 /* mutex initialization are all done here so we
725 * can recall function without having locking issues */
7a1619b9 726 radeon_mutex_init(&rdev->cs_mutex);
9fc04b50 727 radeon_mutex_init(&rdev->ib_pool.mutex);
bf852799 728 for (i = 0; i < RADEON_NUM_RINGS; ++i)
e32eb50d 729 mutex_init(&rdev->ring[i].mutex);
40bacf16 730 mutex_init(&rdev->dc_hw_i2c_mutex);
d8f60cfc
AD
731 if (rdev->family >= CHIP_R600)
732 spin_lock_init(&rdev->ih.lock);
4c788679 733 mutex_init(&rdev->gem.mutex);
c913e23a 734 mutex_init(&rdev->pm.mutex);
5876dd24 735 mutex_init(&rdev->vram_mutex);
7465280c 736 rwlock_init(&rdev->fence_lock);
15d3332f 737 rwlock_init(&rdev->semaphore_drv.lock);
9f022ddf 738 INIT_LIST_HEAD(&rdev->gem.objects);
73a6d3fc 739 init_waitqueue_head(&rdev->irq.vblank_queue);
2031f77c 740 init_waitqueue_head(&rdev->irq.idle_queue);
c1341e52 741 INIT_LIST_HEAD(&rdev->semaphore_drv.bo);
721604a1
JG
742 /* initialize vm here */
743 rdev->vm_manager.use_bitmap = 1;
744 rdev->vm_manager.max_pfn = 1 << 20;
745 INIT_LIST_HEAD(&rdev->vm_manager.lru_vm);
771fe6b9 746
4aac0473
JG
747 /* Set asic functions */
748 r = radeon_asic_init(rdev);
36421338 749 if (r)
4aac0473 750 return r;
36421338 751 radeon_check_arguments(rdev);
4aac0473 752
f95df9ca
AD
753 /* all of the newer IGP chips have an internal gart
754 * However some rs4xx report as AGP, so remove that here.
755 */
756 if ((rdev->family >= CHIP_RS400) &&
757 (rdev->flags & RADEON_IS_IGP)) {
758 rdev->flags &= ~RADEON_IS_AGP;
759 }
760
30256a3f 761 if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
b574f251 762 radeon_agp_disable(rdev);
771fe6b9
JG
763 }
764
ad49f501
DA
765 /* set DMA mask + need_dma32 flags.
766 * PCIE - can handle 40-bits.
005a83f1 767 * IGP - can handle 40-bits
ad49f501 768 * AGP - generally dma32 is safest
005a83f1 769 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
ad49f501
DA
770 */
771 rdev->need_dma32 = false;
772 if (rdev->flags & RADEON_IS_AGP)
773 rdev->need_dma32 = true;
005a83f1
AD
774 if ((rdev->flags & RADEON_IS_PCI) &&
775 (rdev->family < CHIP_RS400))
ad49f501
DA
776 rdev->need_dma32 = true;
777
778 dma_bits = rdev->need_dma32 ? 32 : 40;
779 r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
771fe6b9 780 if (r) {
62fff811 781 rdev->need_dma32 = true;
c52494f6 782 dma_bits = 32;
771fe6b9
JG
783 printk(KERN_WARNING "radeon: No suitable DMA available.\n");
784 }
c52494f6
KRW
785 r = pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
786 if (r) {
787 pci_set_consistent_dma_mask(rdev->pdev, DMA_BIT_MASK(32));
788 printk(KERN_WARNING "radeon: No coherent DMA available.\n");
789 }
771fe6b9
JG
790
791 /* Registers mapping */
792 /* TODO: block userspace mapping of io register */
01d73a69
JC
793 rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
794 rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
771fe6b9
JG
795 rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
796 if (rdev->rmmio == NULL) {
797 return -ENOMEM;
798 }
799 DRM_INFO("register mmio base: 0x%08X\n", (uint32_t)rdev->rmmio_base);
800 DRM_INFO("register mmio size: %u\n", (unsigned)rdev->rmmio_size);
801
351a52a2
AD
802 /* io port mapping */
803 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
804 if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
805 rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
806 rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
807 break;
808 }
809 }
810 if (rdev->rio_mem == NULL)
811 DRM_ERROR("Unable to find PCI I/O BAR\n");
812
28d52043 813 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
93239ea1
DA
814 /* this will fail for cards that aren't VGA class devices, just
815 * ignore it */
816 vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
6a9ee8af
DA
817 vga_switcheroo_register_client(rdev->pdev,
818 radeon_switcheroo_set_state,
8d608aa6 819 NULL,
6a9ee8af 820 radeon_switcheroo_can_switch);
28d52043 821
3ce0a23d 822 r = radeon_init(rdev);
b574f251 823 if (r)
3ce0a23d 824 return r;
3ce0a23d 825
b574f251
JG
826 if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
827 /* Acceleration not working on AGP card try again
828 * with fallback to PCI or PCIE GART
829 */
a2d07b74 830 radeon_asic_reset(rdev);
b574f251
JG
831 radeon_fini(rdev);
832 radeon_agp_disable(rdev);
833 r = radeon_init(rdev);
4aac0473
JG
834 if (r)
835 return r;
771fe6b9 836 }
60a7e396 837 if ((radeon_testing & 1)) {
ecc0b326
MD
838 radeon_test_moves(rdev);
839 }
60a7e396
CK
840 if ((radeon_testing & 2)) {
841 radeon_test_syncing(rdev);
842 }
771fe6b9 843 if (radeon_benchmarking) {
638dd7db 844 radeon_benchmark(rdev, radeon_benchmarking);
771fe6b9 845 }
6cf8a3f5 846 return 0;
771fe6b9
JG
847}
848
4d8bf9ae
CK
849static void radeon_debugfs_remove_files(struct radeon_device *rdev);
850
771fe6b9
JG
851void radeon_device_fini(struct radeon_device *rdev)
852{
771fe6b9
JG
853 DRM_INFO("radeon: finishing device.\n");
854 rdev->shutdown = true;
90aca4d2
JG
855 /* evict vram memory */
856 radeon_bo_evict_vram(rdev);
62a8ea3f 857 radeon_fini(rdev);
6a9ee8af 858 vga_switcheroo_unregister_client(rdev->pdev);
c1176d6f 859 vga_client_register(rdev->pdev, NULL, NULL, NULL);
e0a2ca73
AD
860 if (rdev->rio_mem)
861 pci_iounmap(rdev->pdev, rdev->rio_mem);
351a52a2 862 rdev->rio_mem = NULL;
771fe6b9
JG
863 iounmap(rdev->rmmio);
864 rdev->rmmio = NULL;
4d8bf9ae 865 radeon_debugfs_remove_files(rdev);
771fe6b9
JG
866}
867
868
869/*
870 * Suspend & resume.
871 */
872int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
873{
875c1866 874 struct radeon_device *rdev;
771fe6b9 875 struct drm_crtc *crtc;
d8dcaa1d 876 struct drm_connector *connector;
7465280c 877 int i, r;
771fe6b9 878
875c1866 879 if (dev == NULL || dev->dev_private == NULL) {
771fe6b9
JG
880 return -ENODEV;
881 }
882 if (state.event == PM_EVENT_PRETHAW) {
883 return 0;
884 }
875c1866
DJ
885 rdev = dev->dev_private;
886
5bcf719b 887 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af 888 return 0;
d8dcaa1d 889
86698c20
SF
890 drm_kms_helper_poll_disable(dev);
891
d8dcaa1d
AD
892 /* turn off display hw */
893 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
894 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
895 }
896
771fe6b9
JG
897 /* unpin the front buffers */
898 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
899 struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
4c788679 900 struct radeon_bo *robj;
771fe6b9
JG
901
902 if (rfb == NULL || rfb->obj == NULL) {
903 continue;
904 }
7e4d15d9 905 robj = gem_to_radeon_bo(rfb->obj);
38651674
DA
906 /* don't unpin kernel fb objects */
907 if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
4c788679 908 r = radeon_bo_reserve(robj, false);
38651674 909 if (r == 0) {
4c788679
JG
910 radeon_bo_unpin(robj);
911 radeon_bo_unreserve(robj);
912 }
771fe6b9
JG
913 }
914 }
915 /* evict vram memory */
4c788679 916 radeon_bo_evict_vram(rdev);
771fe6b9 917 /* wait for gpu to finish processing current batch */
7465280c
AD
918 for (i = 0; i < RADEON_NUM_RINGS; i++)
919 radeon_fence_wait_last(rdev, i);
771fe6b9 920
f657c2a7
YZ
921 radeon_save_bios_scratch_regs(rdev);
922
ce8f5370 923 radeon_pm_suspend(rdev);
62a8ea3f 924 radeon_suspend(rdev);
d4877cf2 925 radeon_hpd_fini(rdev);
771fe6b9 926 /* evict remaining vram memory */
4c788679 927 radeon_bo_evict_vram(rdev);
771fe6b9 928
10b06122
JG
929 radeon_agp_suspend(rdev);
930
771fe6b9
JG
931 pci_save_state(dev->pdev);
932 if (state.event == PM_EVENT_SUSPEND) {
933 /* Shut down the device */
934 pci_disable_device(dev->pdev);
935 pci_set_power_state(dev->pdev, PCI_D3hot);
936 }
ac751efa 937 console_lock();
38651674 938 radeon_fbdev_set_suspend(rdev, 1);
ac751efa 939 console_unlock();
771fe6b9
JG
940 return 0;
941}
942
943int radeon_resume_kms(struct drm_device *dev)
944{
09bdf591 945 struct drm_connector *connector;
771fe6b9 946 struct radeon_device *rdev = dev->dev_private;
771fe6b9 947
5bcf719b 948 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
6a9ee8af
DA
949 return 0;
950
ac751efa 951 console_lock();
771fe6b9
JG
952 pci_set_power_state(dev->pdev, PCI_D0);
953 pci_restore_state(dev->pdev);
954 if (pci_enable_device(dev->pdev)) {
ac751efa 955 console_unlock();
771fe6b9
JG
956 return -1;
957 }
958 pci_set_master(dev->pdev);
0ebf1717
DA
959 /* resume AGP if in use */
960 radeon_agp_resume(rdev);
62a8ea3f 961 radeon_resume(rdev);
ce8f5370 962 radeon_pm_resume(rdev);
f657c2a7 963 radeon_restore_bios_scratch_regs(rdev);
09bdf591 964
38651674 965 radeon_fbdev_set_suspend(rdev, 0);
ac751efa 966 console_unlock();
771fe6b9 967
3fa47d9e
AD
968 /* init dig PHYs, disp eng pll */
969 if (rdev->is_atom_bios) {
ac89af1e 970 radeon_atom_encoder_init(rdev);
f3f1f03e 971 radeon_atom_disp_eng_pll_init(rdev);
3fa47d9e 972 }
d4877cf2
AD
973 /* reset hpd state */
974 radeon_hpd_init(rdev);
771fe6b9
JG
975 /* blat the mode back in */
976 drm_helper_resume_force_mode(dev);
a93f344d
AD
977 /* turn on display hw */
978 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
979 drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
980 }
86698c20
SF
981
982 drm_kms_helper_poll_enable(dev);
771fe6b9
JG
983 return 0;
984}
985
90aca4d2
JG
986int radeon_gpu_reset(struct radeon_device *rdev)
987{
988 int r;
8fd1b84c 989 int resched;
90aca4d2 990
7a1619b9
MD
991 /* Prevent CS ioctl from interfering */
992 radeon_mutex_lock(&rdev->cs_mutex);
993
90aca4d2 994 radeon_save_bios_scratch_regs(rdev);
8fd1b84c
DA
995 /* block TTM */
996 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
90aca4d2
JG
997 radeon_suspend(rdev);
998
999 r = radeon_asic_reset(rdev);
1000 if (!r) {
1001 dev_info(rdev->dev, "GPU reset succeed\n");
1002 radeon_resume(rdev);
1003 radeon_restore_bios_scratch_regs(rdev);
1004 drm_helper_resume_force_mode(rdev->ddev);
8fd1b84c 1005 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
90aca4d2 1006 }
7a1619b9
MD
1007
1008 radeon_mutex_unlock(&rdev->cs_mutex);
1009
1010 if (r) {
1011 /* bad news, how to tell it to userspace ? */
1012 dev_info(rdev->dev, "GPU reset failed\n");
1013 }
1014
90aca4d2
JG
1015 return r;
1016}
1017
771fe6b9
JG
1018
1019/*
1020 * Debugfs
1021 */
771fe6b9
JG
1022int radeon_debugfs_add_files(struct radeon_device *rdev,
1023 struct drm_info_list *files,
1024 unsigned nfiles)
1025{
1026 unsigned i;
1027
4d8bf9ae
CK
1028 for (i = 0; i < rdev->debugfs_count; i++) {
1029 if (rdev->debugfs[i].files == files) {
771fe6b9
JG
1030 /* Already registered */
1031 return 0;
1032 }
1033 }
c245cb9e 1034
4d8bf9ae 1035 i = rdev->debugfs_count + 1;
c245cb9e
MW
1036 if (i > RADEON_DEBUGFS_MAX_COMPONENTS) {
1037 DRM_ERROR("Reached maximum number of debugfs components.\n");
1038 DRM_ERROR("Report so we increase "
1039 "RADEON_DEBUGFS_MAX_COMPONENTS.\n");
771fe6b9
JG
1040 return -EINVAL;
1041 }
4d8bf9ae
CK
1042 rdev->debugfs[rdev->debugfs_count].files = files;
1043 rdev->debugfs[rdev->debugfs_count].num_files = nfiles;
1044 rdev->debugfs_count = i;
771fe6b9
JG
1045#if defined(CONFIG_DEBUG_FS)
1046 drm_debugfs_create_files(files, nfiles,
1047 rdev->ddev->control->debugfs_root,
1048 rdev->ddev->control);
1049 drm_debugfs_create_files(files, nfiles,
1050 rdev->ddev->primary->debugfs_root,
1051 rdev->ddev->primary);
1052#endif
1053 return 0;
1054}
1055
4d8bf9ae
CK
1056static void radeon_debugfs_remove_files(struct radeon_device *rdev)
1057{
1058#if defined(CONFIG_DEBUG_FS)
1059 unsigned i;
1060
1061 for (i = 0; i < rdev->debugfs_count; i++) {
1062 drm_debugfs_remove_files(rdev->debugfs[i].files,
1063 rdev->debugfs[i].num_files,
1064 rdev->ddev->control);
1065 drm_debugfs_remove_files(rdev->debugfs[i].files,
1066 rdev->debugfs[i].num_files,
1067 rdev->ddev->primary);
1068 }
1069#endif
1070}
1071
771fe6b9
JG
1072#if defined(CONFIG_DEBUG_FS)
1073int radeon_debugfs_init(struct drm_minor *minor)
1074{
1075 return 0;
1076}
1077
1078void radeon_debugfs_cleanup(struct drm_minor *minor)
1079{
771fe6b9
JG
1080}
1081#endif
This page took 0.232671 seconds and 5 git commands to generate.