drm/i915/dma: enforce pr_<loglevel> consistency
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_dma.c
CommitLineData
1da177e4
LT
1/* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
d1d70677 31#include <linux/async.h>
760285e7
DH
32#include <drm/drmP.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_fb_helper.h>
4f03b1fc 35#include <drm/drm_legacy.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
1da177e4 38#include "i915_drv.h"
e21fd552 39#include "i915_vgpu.h"
1c5d22f7 40#include "i915_trace.h"
dcdb1674 41#include <linux/pci.h>
a4de0526
DV
42#include <linux/console.h>
43#include <linux/vt.h>
28d52043 44#include <linux/vgaarb.h>
c4804411
ZW
45#include <linux/acpi.h>
46#include <linux/pnp.h>
6a9ee8af 47#include <linux/vga_switcheroo.h>
5a0e3ad6 48#include <linux/slab.h>
44834a67 49#include <acpi/video.h>
8a187455
PZ
50#include <linux/pm.h>
51#include <linux/pm_runtime.h>
4bdc7293 52#include <linux/oom.h>
1da177e4 53
1da177e4 54
c153f45f
EA
55static int i915_getparam(struct drm_device *dev, void *data,
56 struct drm_file *file_priv)
1da177e4 57{
4c8a4be9 58 struct drm_i915_private *dev_priv = dev->dev_private;
c153f45f 59 drm_i915_getparam_t *param = data;
1da177e4
LT
60 int value;
61
c153f45f 62 switch (param->param) {
1da177e4 63 case I915_PARAM_IRQ_ACTIVE:
1da177e4 64 case I915_PARAM_ALLOW_BATCHBUFFER:
0d6aa60b 65 case I915_PARAM_LAST_DISPATCH:
ac883c84 66 /* Reject all old ums/dri params. */
5c6c6003 67 return -ENODEV;
ed4c9c4a 68 case I915_PARAM_CHIPSET_ID:
ffbab09b 69 value = dev->pdev->device;
ed4c9c4a 70 break;
27cd4461
NR
71 case I915_PARAM_REVISION:
72 value = dev->pdev->revision;
73 break;
673a394b 74 case I915_PARAM_HAS_GEM:
2e895b17 75 value = 1;
673a394b 76 break;
0f973f27
JB
77 case I915_PARAM_NUM_FENCES_AVAIL:
78 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
79 break;
02e792fb
DV
80 case I915_PARAM_HAS_OVERLAY:
81 value = dev_priv->overlay ? 1 : 0;
82 break;
e9560f7c
JB
83 case I915_PARAM_HAS_PAGEFLIPPING:
84 value = 1;
85 break;
76446cac
JB
86 case I915_PARAM_HAS_EXECBUF2:
87 /* depends on GEM */
2e895b17 88 value = 1;
76446cac 89 break;
e3a815fc 90 case I915_PARAM_HAS_BSD:
edc912f5 91 value = intel_ring_initialized(&dev_priv->ring[VCS]);
e3a815fc 92 break;
549f7365 93 case I915_PARAM_HAS_BLT:
edc912f5 94 value = intel_ring_initialized(&dev_priv->ring[BCS]);
549f7365 95 break;
a1f2cc73
XH
96 case I915_PARAM_HAS_VEBOX:
97 value = intel_ring_initialized(&dev_priv->ring[VECS]);
98 break;
08e16dc8
ZG
99 case I915_PARAM_HAS_BSD2:
100 value = intel_ring_initialized(&dev_priv->ring[VCS2]);
101 break;
a00b10c3
CW
102 case I915_PARAM_HAS_RELAXED_FENCING:
103 value = 1;
104 break;
bbf0c6b3
DV
105 case I915_PARAM_HAS_COHERENT_RINGS:
106 value = 1;
107 break;
72bfa19c
CW
108 case I915_PARAM_HAS_EXEC_CONSTANTS:
109 value = INTEL_INFO(dev)->gen >= 4;
110 break;
271d81b8
CW
111 case I915_PARAM_HAS_RELAXED_DELTA:
112 value = 1;
113 break;
ae662d31
EA
114 case I915_PARAM_HAS_GEN7_SOL_RESET:
115 value = 1;
116 break;
3d29b842
ED
117 case I915_PARAM_HAS_LLC:
118 value = HAS_LLC(dev);
119 break;
651d794f
CW
120 case I915_PARAM_HAS_WT:
121 value = HAS_WT(dev);
122 break;
777ee96f 123 case I915_PARAM_HAS_ALIASING_PPGTT:
896ab1a5 124 value = USES_PPGTT(dev);
777ee96f 125 break;
172cf15d
BW
126 case I915_PARAM_HAS_WAIT_TIMEOUT:
127 value = 1;
128 break;
2fedbff9
CW
129 case I915_PARAM_HAS_SEMAPHORES:
130 value = i915_semaphore_is_enabled(dev);
131 break;
ec6f1bb9
DA
132 case I915_PARAM_HAS_PRIME_VMAP_FLUSH:
133 value = 1;
134 break;
d7d4eedd
CW
135 case I915_PARAM_HAS_SECURE_BATCHES:
136 value = capable(CAP_SYS_ADMIN);
137 break;
b45305fc
DV
138 case I915_PARAM_HAS_PINNED_BATCHES:
139 value = 1;
140 break;
ed5982e6
DV
141 case I915_PARAM_HAS_EXEC_NO_RELOC:
142 value = 1;
143 break;
eef90ccb
CW
144 case I915_PARAM_HAS_EXEC_HANDLE_LUT:
145 value = 1;
146 break;
d728c8ef
BV
147 case I915_PARAM_CMD_PARSER_VERSION:
148 value = i915_cmd_parser_get_version();
149 break;
6a2c4232
CW
150 case I915_PARAM_HAS_COHERENT_PHYS_GTT:
151 value = 1;
1816f923
AG
152 break;
153 case I915_PARAM_MMAP_VERSION:
154 value = 1;
6a2c4232 155 break;
a1559ffe
JM
156 case I915_PARAM_SUBSLICE_TOTAL:
157 value = INTEL_INFO(dev)->subslice_total;
158 if (!value)
159 return -ENODEV;
160 break;
161 case I915_PARAM_EU_TOTAL:
162 value = INTEL_INFO(dev)->eu_total;
163 if (!value)
164 return -ENODEV;
165 break;
49e4d842
CW
166 case I915_PARAM_HAS_GPU_RESET:
167 value = i915.enable_hangcheck &&
49e4d842
CW
168 intel_has_gpu_reset(dev);
169 break;
a9ed33ca
AJ
170 case I915_PARAM_HAS_RESOURCE_STREAMER:
171 value = HAS_RESOURCE_STREAMER(dev);
172 break;
1da177e4 173 default:
e29c32da 174 DRM_DEBUG("Unknown parameter %d\n", param->param);
20caafa6 175 return -EINVAL;
1da177e4
LT
176 }
177
1d6ac185
DV
178 if (copy_to_user(param->value, &value, sizeof(int))) {
179 DRM_ERROR("copy_to_user failed\n");
20caafa6 180 return -EFAULT;
1da177e4
LT
181 }
182
183 return 0;
184}
185
c153f45f
EA
186static int i915_setparam(struct drm_device *dev, void *data,
187 struct drm_file *file_priv)
1da177e4 188{
4c8a4be9 189 struct drm_i915_private *dev_priv = dev->dev_private;
c153f45f 190 drm_i915_setparam_t *param = data;
1da177e4 191
c153f45f 192 switch (param->param) {
1da177e4 193 case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
1da177e4 194 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
1da177e4 195 case I915_SETPARAM_ALLOW_BATCHBUFFER:
ac883c84 196 /* Reject all old ums/dri params. */
5c6c6003
CW
197 return -ENODEV;
198
0f973f27
JB
199 case I915_SETPARAM_NUM_USED_FENCES:
200 if (param->value > dev_priv->num_fence_regs ||
201 param->value < 0)
202 return -EINVAL;
203 /* Userspace can use first N regs */
204 dev_priv->fence_reg_start = param->value;
205 break;
1da177e4 206 default:
8a4c47f3 207 DRM_DEBUG_DRIVER("unknown parameter %d\n",
be25ed9c 208 param->param);
20caafa6 209 return -EINVAL;
1da177e4
LT
210 }
211
212 return 0;
213}
214
ec2a4c3f
DA
215static int i915_get_bridge_dev(struct drm_device *dev)
216{
217 struct drm_i915_private *dev_priv = dev->dev_private;
218
0206e353 219 dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
ec2a4c3f
DA
220 if (!dev_priv->bridge_dev) {
221 DRM_ERROR("bridge device not found\n");
222 return -1;
223 }
224 return 0;
225}
226
c4804411
ZW
227#define MCHBAR_I915 0x44
228#define MCHBAR_I965 0x48
229#define MCHBAR_SIZE (4*4096)
230
231#define DEVEN_REG 0x54
232#define DEVEN_MCHBAR_EN (1 << 28)
233
234/* Allocate space for the MCH regs if needed, return nonzero on error */
235static int
236intel_alloc_mchbar_resource(struct drm_device *dev)
237{
4c8a4be9 238 struct drm_i915_private *dev_priv = dev->dev_private;
a6c45cf0 239 int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
c4804411
ZW
240 u32 temp_lo, temp_hi = 0;
241 u64 mchbar_addr;
a25c25c2 242 int ret;
c4804411 243
a6c45cf0 244 if (INTEL_INFO(dev)->gen >= 4)
c4804411
ZW
245 pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
246 pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
247 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
248
249 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
250#ifdef CONFIG_PNP
251 if (mchbar_addr &&
a25c25c2
CW
252 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE))
253 return 0;
c4804411
ZW
254#endif
255
256 /* Get some space for it */
a25c25c2
CW
257 dev_priv->mch_res.name = "i915 MCHBAR";
258 dev_priv->mch_res.flags = IORESOURCE_MEM;
259 ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus,
260 &dev_priv->mch_res,
c4804411
ZW
261 MCHBAR_SIZE, MCHBAR_SIZE,
262 PCIBIOS_MIN_MEM,
a25c25c2 263 0, pcibios_align_resource,
c4804411
ZW
264 dev_priv->bridge_dev);
265 if (ret) {
266 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
267 dev_priv->mch_res.start = 0;
a25c25c2 268 return ret;
c4804411
ZW
269 }
270
a6c45cf0 271 if (INTEL_INFO(dev)->gen >= 4)
c4804411
ZW
272 pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
273 upper_32_bits(dev_priv->mch_res.start));
274
275 pci_write_config_dword(dev_priv->bridge_dev, reg,
276 lower_32_bits(dev_priv->mch_res.start));
a25c25c2 277 return 0;
c4804411
ZW
278}
279
280/* Setup MCHBAR if possible, return true if we should disable it again */
281static void
282intel_setup_mchbar(struct drm_device *dev)
283{
4c8a4be9 284 struct drm_i915_private *dev_priv = dev->dev_private;
a6c45cf0 285 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
c4804411
ZW
286 u32 temp;
287 bool enabled;
288
11ea8b7d
JB
289 if (IS_VALLEYVIEW(dev))
290 return;
291
c4804411
ZW
292 dev_priv->mchbar_need_disable = false;
293
294 if (IS_I915G(dev) || IS_I915GM(dev)) {
295 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
296 enabled = !!(temp & DEVEN_MCHBAR_EN);
297 } else {
298 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
299 enabled = temp & 1;
300 }
301
302 /* If it's already enabled, don't have to do anything */
303 if (enabled)
304 return;
305
306 if (intel_alloc_mchbar_resource(dev))
307 return;
308
309 dev_priv->mchbar_need_disable = true;
310
311 /* Space is allocated or reserved, so enable it. */
312 if (IS_I915G(dev) || IS_I915GM(dev)) {
313 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
314 temp | DEVEN_MCHBAR_EN);
315 } else {
316 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
317 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
318 }
319}
320
321static void
322intel_teardown_mchbar(struct drm_device *dev)
323{
4c8a4be9 324 struct drm_i915_private *dev_priv = dev->dev_private;
a6c45cf0 325 int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
c4804411
ZW
326 u32 temp;
327
328 if (dev_priv->mchbar_need_disable) {
329 if (IS_I915G(dev) || IS_I915GM(dev)) {
330 pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
331 temp &= ~DEVEN_MCHBAR_EN;
332 pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
333 } else {
334 pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
335 temp &= ~1;
336 pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
337 }
338 }
339
340 if (dev_priv->mch_res.start)
341 release_resource(&dev_priv->mch_res);
342}
343
28d52043
DA
344/* true = enable decode, false = disable decoder */
345static unsigned int i915_vga_set_decode(void *cookie, bool state)
346{
347 struct drm_device *dev = cookie;
348
349 intel_modeset_vga_set_state(dev, state);
350 if (state)
351 return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
352 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
353 else
354 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
355}
356
6a9ee8af
DA
357static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
358{
359 struct drm_device *dev = pci_get_drvdata(pdev);
360 pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
1a5036bf 361
6a9ee8af 362 if (state == VGA_SWITCHEROO_ON) {
a70491cc 363 pr_info("switched on\n");
5bcf719b 364 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
6a9ee8af
DA
365 /* i915 resume handler doesn't set to D0 */
366 pci_set_power_state(dev->pdev, PCI_D0);
1751fcf9 367 i915_resume_switcheroo(dev);
5bcf719b 368 dev->switch_power_state = DRM_SWITCH_POWER_ON;
6a9ee8af 369 } else {
fa9d6078 370 pr_info("switched off\n");
5bcf719b 371 dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1751fcf9 372 i915_suspend_switcheroo(dev, pmm);
5bcf719b 373 dev->switch_power_state = DRM_SWITCH_POWER_OFF;
6a9ee8af
DA
374 }
375}
376
377static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
378{
379 struct drm_device *dev = pci_get_drvdata(pdev);
6a9ee8af 380
fc8fd40e
DV
381 /*
382 * FIXME: open_count is protected by drm_global_mutex but that would lead to
383 * locking inversion with the driver load path. And the access here is
384 * completely racy anyway. So don't bother with locking for now.
385 */
386 return dev->open_count == 0;
6a9ee8af
DA
387}
388
26ec685f
TI
389static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
390 .set_gpu_state = i915_switcheroo_set_state,
391 .reprobe = NULL,
392 .can_switch = i915_switcheroo_can_switch,
393};
394
2c7111db
CW
395static int i915_load_modeset_init(struct drm_device *dev)
396{
397 struct drm_i915_private *dev_priv = dev->dev_private;
398 int ret;
79e53945 399
6d139a87 400 ret = intel_parse_bios(dev);
79e53945
JB
401 if (ret)
402 DRM_INFO("failed to find VBIOS tables\n");
403
934f992c
CW
404 /* If we have > 1 VGA cards, then we need to arbitrate access
405 * to the common VGA resources.
406 *
407 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
408 * then we do not take part in VGA arbitration and the
409 * vga_client_register() fails with -ENODEV.
410 */
ebff5fa9
DA
411 ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
412 if (ret && ret != -ENODEV)
413 goto out;
28d52043 414
723bfd70
JB
415 intel_register_dsm_handler();
416
0d69704a 417 ret = vga_switcheroo_register_client(dev->pdev, &i915_switcheroo_ops, false);
6a9ee8af 418 if (ret)
5a79395b 419 goto cleanup_vga_client;
6a9ee8af 420
9797fbfb
CW
421 /* Initialise stolen first so that we may reserve preallocated
422 * objects for the BIOS to KMS transition.
423 */
424 ret = i915_gem_init_stolen(dev);
425 if (ret)
426 goto cleanup_vga_switcheroo;
427
e13192f6
ID
428 intel_power_domains_init_hw(dev_priv);
429
2aeb7d3a 430 ret = intel_irq_install(dev_priv);
52d7eced
DV
431 if (ret)
432 goto cleanup_gem_stolen;
433
434 /* Important: The output setup functions called by modeset_init need
435 * working irqs for e.g. gmbus and dp aux transfers. */
b01f2c3a
JB
436 intel_modeset_init(dev);
437
33a732f4
AD
438 /* intel_guc_ucode_init() needs the mutex to allocate GEM objects */
439 mutex_lock(&dev->struct_mutex);
440 intel_guc_ucode_init(dev);
441 mutex_unlock(&dev->struct_mutex);
442
1070a42b 443 ret = i915_gem_init(dev);
79e53945 444 if (ret)
713028b3 445 goto cleanup_irq;
2c7111db 446
52d7eced 447 intel_modeset_gem_init(dev);
2c7111db 448
79e53945
JB
449 /* Always safe in the mode setting case. */
450 /* FIXME: do pre/post-mode set stuff in core KMS code */
ba0bf120 451 dev->vblank_disable_allowed = true;
713028b3 452 if (INTEL_INFO(dev)->num_pipes == 0)
e3c74757 453 return 0;
79e53945 454
5a79395b
CW
455 ret = intel_fbdev_init(dev);
456 if (ret)
52d7eced
DV
457 goto cleanup_gem;
458
20afbda2 459 /* Only enable hotplug handling once the fbdev is fully set up. */
b963291c 460 intel_hpd_init(dev_priv);
20afbda2
DV
461
462 /*
463 * Some ports require correctly set-up hpd registers for detection to
464 * work properly (leading to ghost connected connector status), e.g. VGA
465 * on gm45. Hence we can only set up the initial fbdev config after hpd
466 * irqs are fully enabled. Now we should scan for the initial config
467 * only once hotplug handling is enabled, but due to screwed-up locking
468 * around kms/fbdev init we can't protect the fdbev initial config
469 * scanning against hotplug events. Hence do this first and ignore the
470 * tiny window where we will loose hotplug notifactions.
471 */
d1d70677 472 async_schedule(intel_fbdev_initial_config, dev_priv);
20afbda2 473
eb1f8e4f 474 drm_kms_helper_poll_init(dev);
87acb0a5 475
79e53945
JB
476 return 0;
477
2c7111db
CW
478cleanup_gem:
479 mutex_lock(&dev->struct_mutex);
480 i915_gem_cleanup_ringbuffer(dev);
55d23285 481 i915_gem_context_fini(dev);
2c7111db 482 mutex_unlock(&dev->struct_mutex);
713028b3 483cleanup_irq:
33a732f4
AD
484 mutex_lock(&dev->struct_mutex);
485 intel_guc_ucode_fini(dev);
486 mutex_unlock(&dev->struct_mutex);
52d7eced 487 drm_irq_uninstall(dev);
9797fbfb
CW
488cleanup_gem_stolen:
489 i915_gem_cleanup_stolen(dev);
5a79395b
CW
490cleanup_vga_switcheroo:
491 vga_switcheroo_unregister_client(dev->pdev);
492cleanup_vga_client:
493 vga_client_register(dev->pdev, NULL, NULL, NULL);
79e53945
JB
494out:
495 return ret;
496}
497
243eaf38 498#if IS_ENABLED(CONFIG_FB)
f96de58f 499static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
e188719a
DV
500{
501 struct apertures_struct *ap;
502 struct pci_dev *pdev = dev_priv->dev->pdev;
503 bool primary;
f96de58f 504 int ret;
e188719a
DV
505
506 ap = alloc_apertures(1);
507 if (!ap)
f96de58f 508 return -ENOMEM;
e188719a 509
dabb7a91 510 ap->ranges[0].base = dev_priv->gtt.mappable_base;
f64e2922 511 ap->ranges[0].size = dev_priv->gtt.mappable_end;
93d18799 512
e188719a
DV
513 primary =
514 pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
515
f96de58f 516 ret = remove_conflicting_framebuffers(ap, "inteldrmfb", primary);
e188719a
DV
517
518 kfree(ap);
f96de58f
CW
519
520 return ret;
e188719a 521}
4520f53a 522#else
f96de58f 523static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
4520f53a 524{
f96de58f 525 return 0;
4520f53a
DV
526}
527#endif
e188719a 528
a4de0526
DV
529#if !defined(CONFIG_VGA_CONSOLE)
530static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
531{
532 return 0;
533}
534#elif !defined(CONFIG_DUMMY_CONSOLE)
535static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
536{
537 return -ENODEV;
538}
539#else
540static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
541{
1bb9e632 542 int ret = 0;
a4de0526
DV
543
544 DRM_INFO("Replacing VGA console driver\n");
545
546 console_lock();
1bb9e632
DV
547 if (con_is_bound(&vga_con))
548 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
a4de0526
DV
549 if (ret == 0) {
550 ret = do_unregister_con_driver(&vga_con);
551
552 /* Ignore "already unregistered". */
553 if (ret == -ENODEV)
554 ret = 0;
555 }
556 console_unlock();
557
558 return ret;
559}
560#endif
561
c96ea64e
DV
562static void i915_dump_device_info(struct drm_i915_private *dev_priv)
563{
5c969aa7 564 const struct intel_device_info *info = &dev_priv->info;
c96ea64e 565
e2a5800a
DL
566#define PRINT_S(name) "%s"
567#define SEP_EMPTY
79fc46df
DL
568#define PRINT_FLAG(name) info->name ? #name "," : ""
569#define SEP_COMMA ,
19c656a1 570 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
e2a5800a 571 DEV_INFO_FOR_EACH_FLAG(PRINT_S, SEP_EMPTY),
c96ea64e
DV
572 info->gen,
573 dev_priv->dev->pdev->device,
19c656a1 574 dev_priv->dev->pdev->revision,
79fc46df 575 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG, SEP_COMMA));
e2a5800a
DL
576#undef PRINT_S
577#undef SEP_EMPTY
79fc46df
DL
578#undef PRINT_FLAG
579#undef SEP_COMMA
c96ea64e
DV
580}
581
9705ad8a
JM
582static void cherryview_sseu_info_init(struct drm_device *dev)
583{
584 struct drm_i915_private *dev_priv = dev->dev_private;
585 struct intel_device_info *info;
586 u32 fuse, eu_dis;
587
588 info = (struct intel_device_info *)&dev_priv->info;
589 fuse = I915_READ(CHV_FUSE_GT);
590
591 info->slice_total = 1;
592
593 if (!(fuse & CHV_FGT_DISABLE_SS0)) {
594 info->subslice_per_slice++;
595 eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK |
596 CHV_FGT_EU_DIS_SS0_R1_MASK);
597 info->eu_total += 8 - hweight32(eu_dis);
598 }
599
600 if (!(fuse & CHV_FGT_DISABLE_SS1)) {
601 info->subslice_per_slice++;
602 eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK |
603 CHV_FGT_EU_DIS_SS1_R1_MASK);
604 info->eu_total += 8 - hweight32(eu_dis);
605 }
606
607 info->subslice_total = info->subslice_per_slice;
608 /*
609 * CHV expected to always have a uniform distribution of EU
610 * across subslices.
611 */
612 info->eu_per_subslice = info->subslice_total ?
613 info->eu_total / info->subslice_total :
614 0;
615 /*
616 * CHV supports subslice power gating on devices with more than
617 * one subslice, and supports EU power gating on devices with
618 * more than one EU pair per subslice.
619 */
620 info->has_slice_pg = 0;
621 info->has_subslice_pg = (info->subslice_total > 1);
622 info->has_eu_pg = (info->eu_per_subslice > 2);
623}
624
625static void gen9_sseu_info_init(struct drm_device *dev)
626{
627 struct drm_i915_private *dev_priv = dev->dev_private;
628 struct intel_device_info *info;
dead16e2 629 int s_max = 3, ss_max = 4, eu_max = 8;
9705ad8a 630 int s, ss;
dead16e2
JM
631 u32 fuse2, s_enable, ss_disable, eu_disable;
632 u8 eu_mask = 0xff;
633
9705ad8a
JM
634 info = (struct intel_device_info *)&dev_priv->info;
635 fuse2 = I915_READ(GEN8_FUSE2);
636 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >>
637 GEN8_F2_S_ENA_SHIFT;
638 ss_disable = (fuse2 & GEN9_F2_SS_DIS_MASK) >>
639 GEN9_F2_SS_DIS_SHIFT;
640
9705ad8a
JM
641 info->slice_total = hweight32(s_enable);
642 /*
643 * The subslice disable field is global, i.e. it applies
644 * to each of the enabled slices.
645 */
646 info->subslice_per_slice = ss_max - hweight32(ss_disable);
647 info->subslice_total = info->slice_total *
648 info->subslice_per_slice;
649
650 /*
651 * Iterate through enabled slices and subslices to
652 * count the total enabled EU.
653 */
654 for (s = 0; s < s_max; s++) {
655 if (!(s_enable & (0x1 << s)))
656 /* skip disabled slice */
657 continue;
658
dead16e2 659 eu_disable = I915_READ(GEN9_EU_DISABLE(s));
9705ad8a 660 for (ss = 0; ss < ss_max; ss++) {
dead16e2 661 int eu_per_ss;
9705ad8a
JM
662
663 if (ss_disable & (0x1 << ss))
664 /* skip disabled subslice */
665 continue;
666
dead16e2
JM
667 eu_per_ss = eu_max - hweight8((eu_disable >> (ss*8)) &
668 eu_mask);
9705ad8a
JM
669
670 /*
671 * Record which subslice(s) has(have) 7 EUs. we
672 * can tune the hash used to spread work among
673 * subslices if they are unbalanced.
674 */
dead16e2 675 if (eu_per_ss == 7)
9705ad8a
JM
676 info->subslice_7eu[s] |= 1 << ss;
677
dead16e2 678 info->eu_total += eu_per_ss;
9705ad8a
JM
679 }
680 }
681
682 /*
683 * SKL is expected to always have a uniform distribution
684 * of EU across subslices with the exception that any one
685 * EU in any one subslice may be fused off for die
dead16e2
JM
686 * recovery. BXT is expected to be perfectly uniform in EU
687 * distribution.
9705ad8a
JM
688 */
689 info->eu_per_subslice = info->subslice_total ?
690 DIV_ROUND_UP(info->eu_total,
691 info->subslice_total) : 0;
692 /*
693 * SKL supports slice power gating on devices with more than
694 * one slice, and supports EU power gating on devices with
dead16e2
JM
695 * more than one EU pair per subslice. BXT supports subslice
696 * power gating on devices with more than one subslice, and
697 * supports EU power gating on devices with more than one EU
698 * pair per subslice.
9705ad8a 699 */
ef11bdb3
RV
700 info->has_slice_pg = ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
701 (info->slice_total > 1));
dead16e2
JM
702 info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
703 info->has_eu_pg = (info->eu_per_subslice > 2);
9705ad8a
JM
704}
705
91bedd34
ŁD
706static void broadwell_sseu_info_init(struct drm_device *dev)
707{
708 struct drm_i915_private *dev_priv = dev->dev_private;
709 struct intel_device_info *info;
710 const int s_max = 3, ss_max = 3, eu_max = 8;
711 int s, ss;
712 u32 fuse2, eu_disable[s_max], s_enable, ss_disable;
713
714 fuse2 = I915_READ(GEN8_FUSE2);
715 s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT;
716 ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT;
717
718 eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK;
719 eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) |
720 ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) <<
721 (32 - GEN8_EU_DIS0_S1_SHIFT));
722 eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) |
723 ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) <<
724 (32 - GEN8_EU_DIS1_S2_SHIFT));
725
726
727 info = (struct intel_device_info *)&dev_priv->info;
728 info->slice_total = hweight32(s_enable);
729
730 /*
731 * The subslice disable field is global, i.e. it applies
732 * to each of the enabled slices.
733 */
734 info->subslice_per_slice = ss_max - hweight32(ss_disable);
735 info->subslice_total = info->slice_total * info->subslice_per_slice;
736
737 /*
738 * Iterate through enabled slices and subslices to
739 * count the total enabled EU.
740 */
741 for (s = 0; s < s_max; s++) {
742 if (!(s_enable & (0x1 << s)))
743 /* skip disabled slice */
744 continue;
745
746 for (ss = 0; ss < ss_max; ss++) {
747 u32 n_disabled;
748
749 if (ss_disable & (0x1 << ss))
750 /* skip disabled subslice */
751 continue;
752
753 n_disabled = hweight8(eu_disable[s] >> (ss * eu_max));
754
755 /*
756 * Record which subslices have 7 EUs.
757 */
758 if (eu_max - n_disabled == 7)
759 info->subslice_7eu[s] |= 1 << ss;
760
761 info->eu_total += eu_max - n_disabled;
762 }
763 }
764
765 /*
766 * BDW is expected to always have a uniform distribution of EU across
767 * subslices with the exception that any one EU in any one subslice may
768 * be fused off for die recovery.
769 */
770 info->eu_per_subslice = info->subslice_total ?
771 DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0;
772
773 /*
774 * BDW supports slice power gating on devices with more than
775 * one slice.
776 */
777 info->has_slice_pg = (info->slice_total > 1);
778 info->has_subslice_pg = 0;
779 info->has_eu_pg = 0;
780}
781
22d3fd46
DL
782/*
783 * Determine various intel_device_info fields at runtime.
784 *
785 * Use it when either:
786 * - it's judged too laborious to fill n static structures with the limit
787 * when a simple if statement does the job,
788 * - run-time checks (eg read fuse/strap registers) are needed.
658ac4c6
DL
789 *
790 * This function needs to be called:
791 * - after the MMIO has been setup as we are reading registers,
792 * - after the PCH has been detected,
793 * - before the first usage of the fields it can tweak.
22d3fd46
DL
794 */
795static void intel_device_info_runtime_init(struct drm_device *dev)
796{
658ac4c6 797 struct drm_i915_private *dev_priv = dev->dev_private;
22d3fd46 798 struct intel_device_info *info;
d615a166 799 enum pipe pipe;
22d3fd46 800
658ac4c6 801 info = (struct intel_device_info *)&dev_priv->info;
22d3fd46 802
edd43ed8
DL
803 /*
804 * Skylake and Broxton currently don't expose the topmost plane as its
805 * use is exclusive with the legacy cursor and we only want to expose
806 * one of those, not both. Until we can safely expose the topmost plane
807 * as a DRM_PLANE_TYPE_CURSOR with all the features exposed/supported,
808 * we don't expose the topmost plane at all to prevent ABI breakage
809 * down the line.
810 */
8fb9397d 811 if (IS_BROXTON(dev)) {
edd43ed8
DL
812 info->num_sprites[PIPE_A] = 2;
813 info->num_sprites[PIPE_B] = 2;
814 info->num_sprites[PIPE_C] = 1;
815 } else if (IS_VALLEYVIEW(dev))
055e393f 816 for_each_pipe(dev_priv, pipe)
d615a166
DL
817 info->num_sprites[pipe] = 2;
818 else
055e393f 819 for_each_pipe(dev_priv, pipe)
d615a166 820 info->num_sprites[pipe] = 1;
658ac4c6 821
a0bae57f
DL
822 if (i915.disable_display) {
823 DRM_INFO("Display disabled (module parameter)\n");
824 info->num_pipes = 0;
825 } else if (info->num_pipes > 0 &&
826 (INTEL_INFO(dev)->gen == 7 || INTEL_INFO(dev)->gen == 8) &&
827 !IS_VALLEYVIEW(dev)) {
658ac4c6
DL
828 u32 fuse_strap = I915_READ(FUSE_STRAP);
829 u32 sfuse_strap = I915_READ(SFUSE_STRAP);
830
831 /*
832 * SFUSE_STRAP is supposed to have a bit signalling the display
833 * is fused off. Unfortunately it seems that, at least in
834 * certain cases, fused off display means that PCH display
835 * reads don't land anywhere. In that case, we read 0s.
836 *
837 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
838 * should be set when taking over after the firmware.
839 */
840 if (fuse_strap & ILK_INTERNAL_DISPLAY_DISABLE ||
841 sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
842 (dev_priv->pch_type == PCH_CPT &&
843 !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
844 DRM_INFO("Display fused off, disabling\n");
845 info->num_pipes = 0;
846 }
847 }
693d11c3 848
3873218f 849 /* Initialize slice/subslice/EU info */
9705ad8a
JM
850 if (IS_CHERRYVIEW(dev))
851 cherryview_sseu_info_init(dev);
91bedd34
ŁD
852 else if (IS_BROADWELL(dev))
853 broadwell_sseu_info_init(dev);
dead16e2 854 else if (INTEL_INFO(dev)->gen >= 9)
9705ad8a 855 gen9_sseu_info_init(dev);
3873218f 856
3873218f
JM
857 DRM_DEBUG_DRIVER("slice total: %u\n", info->slice_total);
858 DRM_DEBUG_DRIVER("subslice total: %u\n", info->subslice_total);
859 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
860 DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
861 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
862 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
863 info->has_slice_pg ? "y" : "n");
864 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
865 info->has_subslice_pg ? "y" : "n");
866 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
867 info->has_eu_pg ? "y" : "n");
22d3fd46
DL
868}
869
e27f299e
VS
870static void intel_init_dpio(struct drm_i915_private *dev_priv)
871{
872 if (!IS_VALLEYVIEW(dev_priv))
873 return;
874
875 /*
876 * IOSF_PORT_DPIO is used for VLV x2 PHY (DP/HDMI B and C),
877 * CHV x1 PHY (DP/HDMI D)
878 * IOSF_PORT_DPIO_2 is used for CHV x2 PHY (DP/HDMI B and C)
879 */
880 if (IS_CHERRYVIEW(dev_priv)) {
881 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO_2;
882 DPIO_PHY_IOSF_PORT(DPIO_PHY1) = IOSF_PORT_DPIO;
883 } else {
884 DPIO_PHY_IOSF_PORT(DPIO_PHY0) = IOSF_PORT_DPIO;
885 }
886}
887
79e53945
JB
888/**
889 * i915_driver_load - setup chip and create an initial config
890 * @dev: DRM device
891 * @flags: startup flags
892 *
893 * The driver load routine has to do several things:
894 * - drive output discovery via intel_modeset_init()
895 * - initialize the memory manager
896 * - allocate initial config memory
897 * - setup the DRM framebuffer with the allocated memory
898 */
84b1fd10 899int i915_driver_load(struct drm_device *dev, unsigned long flags)
22eae947 900{
ea059a1e 901 struct drm_i915_private *dev_priv;
5c969aa7 902 struct intel_device_info *info, *device_info;
934d6086 903 int ret = 0, mmio_bar, mmio_size;
9021f284 904 uint32_t aperture_size;
fe669bf8 905
26394d92
DV
906 info = (struct intel_device_info *) flags;
907
b14c5679 908 dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL);
ba8bbcf6
JB
909 if (dev_priv == NULL)
910 return -ENOMEM;
911
755f68f4 912 dev->dev_private = dev_priv;
673a394b 913 dev_priv->dev = dev;
5c969aa7 914
87f1f465 915 /* Setup the write-once "constant" device info */
5c969aa7 916 device_info = (struct intel_device_info *)&dev_priv->info;
87f1f465
CW
917 memcpy(device_info, info, sizeof(dev_priv->info));
918 device_info->device_id = dev->pdev->device;
ba8bbcf6 919
7dcd2677
KK
920 spin_lock_init(&dev_priv->irq_lock);
921 spin_lock_init(&dev_priv->gpu_error.lock);
07f11d49 922 mutex_init(&dev_priv->backlight_lock);
907b28c5 923 spin_lock_init(&dev_priv->uncore.lock);
c20e8355 924 spin_lock_init(&dev_priv->mm.object_stat_lock);
84c33a64 925 spin_lock_init(&dev_priv->mmio_flip_lock);
a580516d 926 mutex_init(&dev_priv->sb_lock);
7dcd2677 927 mutex_init(&dev_priv->modeset_restore_lock);
eb805623 928 mutex_init(&dev_priv->csr_lock);
4a21ef7d 929 mutex_init(&dev_priv->av_mutex);
7dcd2677 930
f742a552 931 intel_pm_setup(dev);
c67a470b 932
07144428
DL
933 intel_display_crc_init(dev);
934
c96ea64e
DV
935 i915_dump_device_info(dev_priv);
936
ed1c9e2c
PZ
937 /* Not all pre-production machines fall into this category, only the
938 * very first ones. Almost everything should work, except for maybe
939 * suspend/resume. And we don't implement workarounds that affect only
940 * pre-production machines. */
941 if (IS_HSW_EARLY_SDV(dev))
942 DRM_INFO("This is an early pre-production Haswell machine. "
943 "It may not be fully functional.\n");
944
ec2a4c3f
DA
945 if (i915_get_bridge_dev(dev)) {
946 ret = -EIO;
947 goto free_priv;
948 }
949
1e1bd0fd
BW
950 mmio_bar = IS_GEN2(dev) ? 1 : 0;
951 /* Before gen4, the registers and the GTT are behind different BARs.
952 * However, from gen4 onwards, the registers and the GTT are shared
953 * in the same BAR, so we want to restrict this ioremap from
954 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
955 * the register BAR remains the same size for all the earlier
956 * generations up to Ironlake.
957 */
958 if (info->gen < 5)
959 mmio_size = 512*1024;
960 else
961 mmio_size = 2*1024*1024;
962
963 dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, mmio_size);
964 if (!dev_priv->regs) {
965 DRM_ERROR("failed to map registers\n");
966 ret = -EIO;
967 goto put_bridge;
968 }
969
c3d685a7
BW
970 /* This must be called before any calls to HAS_PCH_* */
971 intel_detect_pch(dev);
972
973 intel_uncore_init(dev);
974
eb805623
DV
975 /* Load CSR Firmware for SKL */
976 intel_csr_ucode_init(dev);
977
e76e9aeb
BW
978 ret = i915_gem_gtt_init(dev);
979 if (ret)
eb805623 980 goto out_freecsr;
e188719a 981
17fa6463
DV
982 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
983 * otherwise the vga fbdev driver falls over. */
984 ret = i915_kick_out_firmware_fb(dev_priv);
985 if (ret) {
986 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
987 goto out_gtt;
988 }
a4de0526 989
17fa6463
DV
990 ret = i915_kick_out_vgacon(dev_priv);
991 if (ret) {
992 DRM_ERROR("failed to remove conflicting VGA console\n");
993 goto out_gtt;
a4de0526 994 }
e188719a 995
466e69b8
DA
996 pci_set_master(dev->pdev);
997
9f82d238
DV
998 /* overlay on gen2 is broken and can't address above 1G */
999 if (IS_GEN2(dev))
1000 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
1001
6927faf3
JN
1002 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1003 * using 32bit addressing, overwriting memory if HWS is located
1004 * above 4GB.
1005 *
1006 * The documentation also mentions an issue with undefined
1007 * behaviour if any general state is accessed within a page above 4GB,
1008 * which also needs to be handled carefully.
1009 */
1010 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1011 dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
1012
93d18799 1013 aperture_size = dev_priv->gtt.mappable_end;
71e9339c 1014
5d4545ae
BW
1015 dev_priv->gtt.mappable =
1016 io_mapping_create_wc(dev_priv->gtt.mappable_base,
dd2757f8 1017 aperture_size);
5d4545ae 1018 if (dev_priv->gtt.mappable == NULL) {
6644107d 1019 ret = -EIO;
cbb47d17 1020 goto out_gtt;
6644107d
VP
1021 }
1022
911bdf0a
BW
1023 dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
1024 aperture_size);
19966754 1025
e642abbf
CW
1026 /* The i915 workqueue is primarily used for batched retirement of
1027 * requests (and thus managing bo) once the task has been completed
1028 * by the GPU. i915_gem_retire_requests() is called directly when we
1029 * need high-priority retirement, such as waiting for an explicit
1030 * bo.
1031 *
1032 * It is also used for periodic low-priority events, such as
df9c2042 1033 * idle-timers and recording error state.
e642abbf
CW
1034 *
1035 * All tasks on the workqueue are expected to acquire the dev mutex
1036 * so there is no point in running more than one instance of the
53621860 1037 * workqueue at any time. Use an ordered one.
e642abbf 1038 */
53621860 1039 dev_priv->wq = alloc_ordered_workqueue("i915", 0);
9c9fe1f8
EA
1040 if (dev_priv->wq == NULL) {
1041 DRM_ERROR("Failed to create our workqueue.\n");
1042 ret = -ENOMEM;
a7b85d2a 1043 goto out_mtrrfree;
9c9fe1f8
EA
1044 }
1045
5fcece80
JN
1046 dev_priv->hotplug.dp_wq = alloc_ordered_workqueue("i915-dp", 0);
1047 if (dev_priv->hotplug.dp_wq == NULL) {
0e32b39c
DA
1048 DRM_ERROR("Failed to create our dp workqueue.\n");
1049 ret = -ENOMEM;
1050 goto out_freewq;
1051 }
1052
737b1506
CW
1053 dev_priv->gpu_error.hangcheck_wq =
1054 alloc_ordered_workqueue("i915-hangcheck", 0);
1055 if (dev_priv->gpu_error.hangcheck_wq == NULL) {
1056 DRM_ERROR("Failed to create our hangcheck workqueue.\n");
1057 ret = -ENOMEM;
1058 goto out_freedpwq;
1059 }
1060
b963291c 1061 intel_irq_init(dev_priv);
78511f2a 1062 intel_uncore_sanitize(dev);
9880b7a5 1063
c4804411
ZW
1064 /* Try to make sure MCHBAR is enabled before poking at it */
1065 intel_setup_mchbar(dev);
f899fc64 1066 intel_setup_gmbus(dev);
44834a67 1067 intel_opregion_setup(dev);
c4804411 1068
673a394b
EA
1069 i915_gem_load(dev);
1070
ed4cb414
EA
1071 /* On the 945G/GM, the chipset reports the MSI capability on the
1072 * integrated graphics even though the support isn't actually there
1073 * according to the published specs. It doesn't appear to function
1074 * correctly in testing on 945G.
1075 * This may be a side effect of MSI having been made available for PEG
1076 * and the registers being closely associated.
d1ed629f
KP
1077 *
1078 * According to chipset errata, on the 965GM, MSI interrupts may
b60678a7
KP
1079 * be lost or delayed, but we use them anyways to avoid
1080 * stuck interrupts on some machines.
ed4cb414 1081 */
b60678a7 1082 if (!IS_I945G(dev) && !IS_I945GM(dev))
d3e74d02 1083 pci_enable_msi(dev->pdev);
ed4cb414 1084
22d3fd46 1085 intel_device_info_runtime_init(dev);
7f1f3851 1086
e27f299e
VS
1087 intel_init_dpio(dev_priv);
1088
e3c74757
BW
1089 if (INTEL_INFO(dev)->num_pipes) {
1090 ret = drm_vblank_init(dev, INTEL_INFO(dev)->num_pipes);
1091 if (ret)
1092 goto out_gem_unload;
1093 }
52440211 1094
da7e29bd 1095 intel_power_domains_init(dev_priv);
a38911a3 1096
17fa6463
DV
1097 ret = i915_load_modeset_init(dev);
1098 if (ret < 0) {
1099 DRM_ERROR("failed to init modeset\n");
1100 goto out_power_well;
79e53945
JB
1101 }
1102
e21fd552
YZ
1103 /*
1104 * Notify a valid surface after modesetting,
1105 * when running inside a VM.
1106 */
1107 if (intel_vgpu_active(dev))
1108 I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
1109
0136db58
BW
1110 i915_setup_sysfs(dev);
1111
e3c74757
BW
1112 if (INTEL_INFO(dev)->num_pipes) {
1113 /* Must be done after probing outputs */
1114 intel_opregion_init(dev);
8e5c2b77 1115 acpi_video_register();
e3c74757 1116 }
74a365b3 1117
eb48eb00
DV
1118 if (IS_GEN5(dev))
1119 intel_gpu_ips_init(dev_priv);
63ee41d7 1120
f458ebbc 1121 intel_runtime_pm_enable(dev_priv);
8a187455 1122
58fddc28
ID
1123 i915_audio_component_init(dev_priv);
1124
79e53945
JB
1125 return 0;
1126
cbb47d17 1127out_power_well:
f458ebbc 1128 intel_power_domains_fini(dev_priv);
cbb47d17 1129 drm_vblank_cleanup(dev);
56e2ea34 1130out_gem_unload:
4bdc7293
ID
1131 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1132 unregister_shrinker(&dev_priv->mm.shrinker);
a7b85d2a 1133
56e2ea34
CW
1134 if (dev->pdev->msi_enabled)
1135 pci_disable_msi(dev->pdev);
1136
1137 intel_teardown_gmbus(dev);
1138 intel_teardown_mchbar(dev);
22accca0 1139 pm_qos_remove_request(&dev_priv->pm_qos);
737b1506
CW
1140 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
1141out_freedpwq:
5fcece80 1142 destroy_workqueue(dev_priv->hotplug.dp_wq);
0e32b39c 1143out_freewq:
9c9fe1f8 1144 destroy_workqueue(dev_priv->wq);
a7b85d2a 1145out_mtrrfree:
911bdf0a 1146 arch_phys_wc_del(dev_priv->gtt.mtrr);
5d4545ae 1147 io_mapping_free(dev_priv->gtt.mappable);
cbb47d17 1148out_gtt:
90d0a0e8 1149 i915_global_gtt_cleanup(dev);
eb805623
DV
1150out_freecsr:
1151 intel_csr_ucode_fini(dev);
c3d685a7 1152 intel_uncore_fini(dev);
6dda569f 1153 pci_iounmap(dev->pdev, dev_priv->regs);
ec2a4c3f
DA
1154put_bridge:
1155 pci_dev_put(dev_priv->bridge_dev);
79e53945 1156free_priv:
76b1cf21
JL
1157 kmem_cache_destroy(dev_priv->requests);
1158 kmem_cache_destroy(dev_priv->vmas);
1159 kmem_cache_destroy(dev_priv->objects);
9a298b2a 1160 kfree(dev_priv);
ba8bbcf6
JB
1161 return ret;
1162}
1163
1164int i915_driver_unload(struct drm_device *dev)
1165{
1166 struct drm_i915_private *dev_priv = dev->dev_private;
c911fc1c 1167 int ret;
ba8bbcf6 1168
58fddc28
ID
1169 i915_audio_component_cleanup(dev_priv);
1170
ce58c32b
CW
1171 ret = i915_gem_suspend(dev);
1172 if (ret) {
1173 DRM_ERROR("failed to idle hardware: %d\n", ret);
1174 return ret;
1175 }
1176
41373cd5 1177 intel_power_domains_fini(dev_priv);
8a187455 1178
eb48eb00 1179 intel_gpu_ips_teardown();
7648fa99 1180
0136db58
BW
1181 i915_teardown_sysfs(dev);
1182
4bdc7293
ID
1183 WARN_ON(unregister_oom_notifier(&dev_priv->mm.oom_notifier));
1184 unregister_shrinker(&dev_priv->mm.shrinker);
17250b71 1185
5d4545ae 1186 io_mapping_free(dev_priv->gtt.mappable);
911bdf0a 1187 arch_phys_wc_del(dev_priv->gtt.mtrr);
ab657db1 1188
44834a67
CW
1189 acpi_video_unregister();
1190
17fa6463 1191 intel_fbdev_fini(dev);
2ebfaf5f
PZ
1192
1193 drm_vblank_cleanup(dev);
1194
17fa6463 1195 intel_modeset_cleanup(dev);
6c0d9350 1196
17fa6463
DV
1197 /*
1198 * free the memory space allocated for the child device
1199 * config parsed from VBT
1200 */
1201 if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
1202 kfree(dev_priv->vbt.child_dev);
1203 dev_priv->vbt.child_dev = NULL;
1204 dev_priv->vbt.child_dev_num = 0;
79e53945 1205 }
9aa61142
MR
1206 kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
1207 dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
1208 kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
1209 dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
79e53945 1210
17fa6463
DV
1211 vga_switcheroo_unregister_client(dev->pdev);
1212 vga_client_register(dev->pdev, NULL, NULL, NULL);
1213
a8b4899e 1214 /* Free error state after interrupts are fully disabled. */
737b1506 1215 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
a8b4899e 1216 i915_destroy_error_state(dev);
bc0c7f14 1217
ed4cb414
EA
1218 if (dev->pdev->msi_enabled)
1219 pci_disable_msi(dev->pdev);
1220
44834a67 1221 intel_opregion_fini(dev);
8ee1c3db 1222
17fa6463
DV
1223 /* Flush any outstanding unpin_work. */
1224 flush_workqueue(dev_priv->wq);
67e77c5a 1225
17fa6463 1226 mutex_lock(&dev->struct_mutex);
33a732f4 1227 intel_guc_ucode_fini(dev);
17fa6463 1228 i915_gem_cleanup_ringbuffer(dev);
17fa6463
DV
1229 i915_gem_context_fini(dev);
1230 mutex_unlock(&dev->struct_mutex);
7733b49b 1231 intel_fbc_cleanup_cfb(dev_priv);
17fa6463 1232 i915_gem_cleanup_stolen(dev);
79e53945 1233
eb805623
DV
1234 intel_csr_ucode_fini(dev);
1235
f899fc64 1236 intel_teardown_gmbus(dev);
c4804411
ZW
1237 intel_teardown_mchbar(dev);
1238
5fcece80 1239 destroy_workqueue(dev_priv->hotplug.dp_wq);
bc0c7f14 1240 destroy_workqueue(dev_priv->wq);
737b1506 1241 destroy_workqueue(dev_priv->gpu_error.hangcheck_wq);
9ee32fea 1242 pm_qos_remove_request(&dev_priv->pm_qos);
bc0c7f14 1243
90d0a0e8 1244 i915_global_gtt_cleanup(dev);
6640aab6 1245
aec347ab
CW
1246 intel_uncore_fini(dev);
1247 if (dev_priv->regs != NULL)
1248 pci_iounmap(dev->pdev, dev_priv->regs);
1249
76b1cf21
JL
1250 kmem_cache_destroy(dev_priv->requests);
1251 kmem_cache_destroy(dev_priv->vmas);
1252 kmem_cache_destroy(dev_priv->objects);
ec2a4c3f 1253 pci_dev_put(dev_priv->bridge_dev);
2206e6a1 1254 kfree(dev_priv);
ba8bbcf6 1255
22eae947
DA
1256 return 0;
1257}
1258
f787a5f5 1259int i915_driver_open(struct drm_device *dev, struct drm_file *file)
673a394b 1260{
b29c19b6 1261 int ret;
673a394b 1262
b29c19b6
CW
1263 ret = i915_gem_open(dev, file);
1264 if (ret)
1265 return ret;
254f965c 1266
673a394b
EA
1267 return 0;
1268}
1269
79e53945
JB
1270/**
1271 * i915_driver_lastclose - clean up after all DRM clients have exited
1272 * @dev: DRM device
1273 *
1274 * Take care of cleaning up after all DRM clients have exited. In the
1275 * mode setting case, we want to restore the kernel's initial mode (just
1276 * in case the last client left us in a bad state).
1277 *
9021f284 1278 * Additionally, in the non-mode setting case, we'll tear down the GTT
79e53945
JB
1279 * and DMA structures, since the kernel won't be using them, and clea
1280 * up any GEM state.
1281 */
1a5036bf 1282void i915_driver_lastclose(struct drm_device *dev)
1da177e4 1283{
377e91b2
DV
1284 intel_fbdev_restore_mode(dev);
1285 vga_switcheroo_process_delayed_switch();
1da177e4
LT
1286}
1287
2885f6ac 1288void i915_driver_preclose(struct drm_device *dev, struct drm_file *file)
1da177e4 1289{
0d1430a3 1290 mutex_lock(&dev->struct_mutex);
2885f6ac
JH
1291 i915_gem_context_close(dev, file);
1292 i915_gem_release(dev, file);
0d1430a3 1293 mutex_unlock(&dev->struct_mutex);
e2fcdaa9 1294
17fa6463 1295 intel_modeset_preclose(dev, file);
1da177e4
LT
1296}
1297
f787a5f5 1298void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
673a394b 1299{
f787a5f5 1300 struct drm_i915_file_private *file_priv = file->driver_priv;
673a394b 1301
a8ebba75
ZY
1302 if (file_priv && file_priv->bsd_ring)
1303 file_priv->bsd_ring = NULL;
f787a5f5 1304 kfree(file_priv);
673a394b
EA
1305}
1306
4feb7659
DV
1307static int
1308i915_gem_reject_pin_ioctl(struct drm_device *dev, void *data,
1309 struct drm_file *file)
1310{
1311 return -ENODEV;
1312}
1313
baa70943 1314const struct drm_ioctl_desc i915_ioctls[] = {
77f31815
DV
1315 DRM_IOCTL_DEF_DRV(I915_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1316 DRM_IOCTL_DEF_DRV(I915_FLUSH, drm_noop, DRM_AUTH),
1317 DRM_IOCTL_DEF_DRV(I915_FLIP, drm_noop, DRM_AUTH),
1318 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH),
1319 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH),
1320 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH),
10ba5012 1321 DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW),
1b2f1489 1322 DRM_IOCTL_DEF_DRV(I915_SETPARAM, i915_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
b2c606fe
DV
1323 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH),
1324 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH),
1325 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77f31815 1326 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER, drm_noop, DRM_AUTH),
b2c606fe 1327 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
d1c1edbc 1328 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
77f31815
DV
1329 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE, drm_noop, DRM_AUTH),
1330 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH),
1331 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
f548c0e9 1332 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1b2f1489 1333 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH|DRM_UNLOCKED),
10ba5012 1334 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
4feb7659
DV
1335 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
1336 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY|DRM_UNLOCKED),
10ba5012
KH
1337 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1338 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING, i915_gem_set_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1339 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING, i915_gem_get_caching_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1340 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
71b14ab6
DV
1341 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
1342 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY|DRM_UNLOCKED),
10ba5012
KH
1343 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE, i915_gem_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1344 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD, i915_gem_pread_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1345 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE, i915_gem_pwrite_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1346 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP, i915_gem_mmap_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1347 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT, i915_gem_mmap_gtt_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1348 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1349 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1350 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1351 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1352 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1b2f1489 1353 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, DRM_UNLOCKED),
10ba5012 1354 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1b2f1489
DA
1355 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
1356 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
8ea30864 1357 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
a8265c59 1358 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
10ba5012
KH
1359 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_UNLOCKED|DRM_RENDER_ALLOW),
1360 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1361 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY, i915_gem_context_destroy_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1362 DRM_IOCTL_DEF_DRV(I915_REG_READ, i915_reg_read_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
b6359918 1363 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS, i915_get_reset_stats_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
5cc9ed4b 1364 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR, i915_gem_userptr_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
c9dc0f35
CW
1365 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM, i915_gem_context_getparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
1366 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM, i915_gem_context_setparam_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
c94f7029
DA
1367};
1368
f95aeb17 1369int i915_max_ioctl = ARRAY_SIZE(i915_ioctls);
This page took 0.880882 seconds and 5 git commands to generate.