1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/async.h>
33 #include <drm/drm_crtc_helper.h>
34 #include <drm/drm_fb_helper.h>
35 #include <drm/drm_legacy.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
39 #include "i915_vgpu.h"
40 #include "i915_trace.h"
41 #include <linux/pci.h>
42 #include <linux/console.h>
44 #include <linux/vgaarb.h>
45 #include <linux/acpi.h>
46 #include <linux/pnp.h>
47 #include <linux/vga_switcheroo.h>
48 #include <linux/slab.h>
49 #include <acpi/video.h>
51 #include <linux/pm_runtime.h>
52 #include <linux/oom.h>
55 static int i915_getparam(struct drm_device
*dev
, void *data
,
56 struct drm_file
*file_priv
)
58 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
59 drm_i915_getparam_t
*param
= data
;
62 switch (param
->param
) {
63 case I915_PARAM_IRQ_ACTIVE
:
64 case I915_PARAM_ALLOW_BATCHBUFFER
:
65 case I915_PARAM_LAST_DISPATCH
:
66 /* Reject all old ums/dri params. */
68 case I915_PARAM_CHIPSET_ID
:
69 value
= dev
->pdev
->device
;
71 case I915_PARAM_HAS_GEM
:
74 case I915_PARAM_NUM_FENCES_AVAIL
:
75 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
77 case I915_PARAM_HAS_OVERLAY
:
78 value
= dev_priv
->overlay
? 1 : 0;
80 case I915_PARAM_HAS_PAGEFLIPPING
:
83 case I915_PARAM_HAS_EXECBUF2
:
87 case I915_PARAM_HAS_BSD
:
88 value
= intel_ring_initialized(&dev_priv
->ring
[VCS
]);
90 case I915_PARAM_HAS_BLT
:
91 value
= intel_ring_initialized(&dev_priv
->ring
[BCS
]);
93 case I915_PARAM_HAS_VEBOX
:
94 value
= intel_ring_initialized(&dev_priv
->ring
[VECS
]);
96 case I915_PARAM_HAS_BSD2
:
97 value
= intel_ring_initialized(&dev_priv
->ring
[VCS2
]);
99 case I915_PARAM_HAS_RELAXED_FENCING
:
102 case I915_PARAM_HAS_COHERENT_RINGS
:
105 case I915_PARAM_HAS_EXEC_CONSTANTS
:
106 value
= INTEL_INFO(dev
)->gen
>= 4;
108 case I915_PARAM_HAS_RELAXED_DELTA
:
111 case I915_PARAM_HAS_GEN7_SOL_RESET
:
114 case I915_PARAM_HAS_LLC
:
115 value
= HAS_LLC(dev
);
117 case I915_PARAM_HAS_WT
:
120 case I915_PARAM_HAS_ALIASING_PPGTT
:
121 value
= USES_PPGTT(dev
);
123 case I915_PARAM_HAS_WAIT_TIMEOUT
:
126 case I915_PARAM_HAS_SEMAPHORES
:
127 value
= i915_semaphore_is_enabled(dev
);
129 case I915_PARAM_HAS_PRIME_VMAP_FLUSH
:
132 case I915_PARAM_HAS_SECURE_BATCHES
:
133 value
= capable(CAP_SYS_ADMIN
);
135 case I915_PARAM_HAS_PINNED_BATCHES
:
138 case I915_PARAM_HAS_EXEC_NO_RELOC
:
141 case I915_PARAM_HAS_EXEC_HANDLE_LUT
:
144 case I915_PARAM_CMD_PARSER_VERSION
:
145 value
= i915_cmd_parser_get_version();
147 case I915_PARAM_HAS_COHERENT_PHYS_GTT
:
150 case I915_PARAM_MMAP_VERSION
:
154 DRM_DEBUG("Unknown parameter %d\n", param
->param
);
158 if (copy_to_user(param
->value
, &value
, sizeof(int))) {
159 DRM_ERROR("copy_to_user failed\n");
166 static int i915_setparam(struct drm_device
*dev
, void *data
,
167 struct drm_file
*file_priv
)
169 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
170 drm_i915_setparam_t
*param
= data
;
172 switch (param
->param
) {
173 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
174 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
175 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
176 /* Reject all old ums/dri params. */
179 case I915_SETPARAM_NUM_USED_FENCES
:
180 if (param
->value
> dev_priv
->num_fence_regs
||
183 /* Userspace can use first N regs */
184 dev_priv
->fence_reg_start
= param
->value
;
187 DRM_DEBUG_DRIVER("unknown parameter %d\n",
195 static int i915_get_bridge_dev(struct drm_device
*dev
)
197 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
199 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
200 if (!dev_priv
->bridge_dev
) {
201 DRM_ERROR("bridge device not found\n");
207 #define MCHBAR_I915 0x44
208 #define MCHBAR_I965 0x48
209 #define MCHBAR_SIZE (4*4096)
211 #define DEVEN_REG 0x54
212 #define DEVEN_MCHBAR_EN (1 << 28)
214 /* Allocate space for the MCH regs if needed, return nonzero on error */
216 intel_alloc_mchbar_resource(struct drm_device
*dev
)
218 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
219 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
220 u32 temp_lo
, temp_hi
= 0;
224 if (INTEL_INFO(dev
)->gen
>= 4)
225 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
226 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
227 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
229 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
232 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
236 /* Get some space for it */
237 dev_priv
->mch_res
.name
= "i915 MCHBAR";
238 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
239 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
241 MCHBAR_SIZE
, MCHBAR_SIZE
,
243 0, pcibios_align_resource
,
244 dev_priv
->bridge_dev
);
246 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
247 dev_priv
->mch_res
.start
= 0;
251 if (INTEL_INFO(dev
)->gen
>= 4)
252 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
253 upper_32_bits(dev_priv
->mch_res
.start
));
255 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
256 lower_32_bits(dev_priv
->mch_res
.start
));
260 /* Setup MCHBAR if possible, return true if we should disable it again */
262 intel_setup_mchbar(struct drm_device
*dev
)
264 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
265 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
269 if (IS_VALLEYVIEW(dev
))
272 dev_priv
->mchbar_need_disable
= false;
274 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
275 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
276 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
278 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
282 /* If it's already enabled, don't have to do anything */
286 if (intel_alloc_mchbar_resource(dev
))
289 dev_priv
->mchbar_need_disable
= true;
291 /* Space is allocated or reserved, so enable it. */
292 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
293 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
294 temp
| DEVEN_MCHBAR_EN
);
296 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
297 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
302 intel_teardown_mchbar(struct drm_device
*dev
)
304 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
305 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
308 if (dev_priv
->mchbar_need_disable
) {
309 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
310 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
311 temp
&= ~DEVEN_MCHBAR_EN
;
312 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
314 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
316 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
320 if (dev_priv
->mch_res
.start
)
321 release_resource(&dev_priv
->mch_res
);
324 /* true = enable decode, false = disable decoder */
325 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
327 struct drm_device
*dev
= cookie
;
329 intel_modeset_vga_set_state(dev
, state
);
331 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
332 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
334 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
337 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
339 struct drm_device
*dev
= pci_get_drvdata(pdev
);
340 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
342 if (state
== VGA_SWITCHEROO_ON
) {
343 pr_info("switched on\n");
344 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
345 /* i915 resume handler doesn't set to D0 */
346 pci_set_power_state(dev
->pdev
, PCI_D0
);
347 i915_resume_legacy(dev
);
348 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
350 pr_err("switched off\n");
351 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
352 i915_suspend_legacy(dev
, pmm
);
353 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
357 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
359 struct drm_device
*dev
= pci_get_drvdata(pdev
);
362 * FIXME: open_count is protected by drm_global_mutex but that would lead to
363 * locking inversion with the driver load path. And the access here is
364 * completely racy anyway. So don't bother with locking for now.
366 return dev
->open_count
== 0;
369 static const struct vga_switcheroo_client_ops i915_switcheroo_ops
= {
370 .set_gpu_state
= i915_switcheroo_set_state
,
372 .can_switch
= i915_switcheroo_can_switch
,
375 static int i915_load_modeset_init(struct drm_device
*dev
)
377 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
380 ret
= intel_parse_bios(dev
);
382 DRM_INFO("failed to find VBIOS tables\n");
384 /* If we have > 1 VGA cards, then we need to arbitrate access
385 * to the common VGA resources.
387 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
388 * then we do not take part in VGA arbitration and the
389 * vga_client_register() fails with -ENODEV.
391 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
392 if (ret
&& ret
!= -ENODEV
)
395 intel_register_dsm_handler();
397 ret
= vga_switcheroo_register_client(dev
->pdev
, &i915_switcheroo_ops
, false);
399 goto cleanup_vga_client
;
401 /* Initialise stolen first so that we may reserve preallocated
402 * objects for the BIOS to KMS transition.
404 ret
= i915_gem_init_stolen(dev
);
406 goto cleanup_vga_switcheroo
;
408 intel_power_domains_init_hw(dev_priv
);
410 ret
= intel_irq_install(dev_priv
);
412 goto cleanup_gem_stolen
;
414 /* Important: The output setup functions called by modeset_init need
415 * working irqs for e.g. gmbus and dp aux transfers. */
416 intel_modeset_init(dev
);
418 ret
= i915_gem_init(dev
);
422 intel_modeset_gem_init(dev
);
424 /* Always safe in the mode setting case. */
425 /* FIXME: do pre/post-mode set stuff in core KMS code */
426 dev
->vblank_disable_allowed
= true;
427 if (INTEL_INFO(dev
)->num_pipes
== 0)
430 ret
= intel_fbdev_init(dev
);
434 /* Only enable hotplug handling once the fbdev is fully set up. */
435 intel_hpd_init(dev_priv
);
438 * Some ports require correctly set-up hpd registers for detection to
439 * work properly (leading to ghost connected connector status), e.g. VGA
440 * on gm45. Hence we can only set up the initial fbdev config after hpd
441 * irqs are fully enabled. Now we should scan for the initial config
442 * only once hotplug handling is enabled, but due to screwed-up locking
443 * around kms/fbdev init we can't protect the fdbev initial config
444 * scanning against hotplug events. Hence do this first and ignore the
445 * tiny window where we will loose hotplug notifactions.
447 async_schedule(intel_fbdev_initial_config
, dev_priv
);
449 drm_kms_helper_poll_init(dev
);
454 mutex_lock(&dev
->struct_mutex
);
455 i915_gem_cleanup_ringbuffer(dev
);
456 i915_gem_context_fini(dev
);
457 mutex_unlock(&dev
->struct_mutex
);
459 drm_irq_uninstall(dev
);
461 i915_gem_cleanup_stolen(dev
);
462 cleanup_vga_switcheroo
:
463 vga_switcheroo_unregister_client(dev
->pdev
);
465 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
470 #if IS_ENABLED(CONFIG_FB)
471 static int i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
473 struct apertures_struct
*ap
;
474 struct pci_dev
*pdev
= dev_priv
->dev
->pdev
;
478 ap
= alloc_apertures(1);
482 ap
->ranges
[0].base
= dev_priv
->gtt
.mappable_base
;
483 ap
->ranges
[0].size
= dev_priv
->gtt
.mappable_end
;
486 pdev
->resource
[PCI_ROM_RESOURCE
].flags
& IORESOURCE_ROM_SHADOW
;
488 ret
= remove_conflicting_framebuffers(ap
, "inteldrmfb", primary
);
495 static int i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
501 #if !defined(CONFIG_VGA_CONSOLE)
502 static int i915_kick_out_vgacon(struct drm_i915_private
*dev_priv
)
506 #elif !defined(CONFIG_DUMMY_CONSOLE)
507 static int i915_kick_out_vgacon(struct drm_i915_private
*dev_priv
)
512 static int i915_kick_out_vgacon(struct drm_i915_private
*dev_priv
)
516 DRM_INFO("Replacing VGA console driver\n");
519 if (con_is_bound(&vga_con
))
520 ret
= do_take_over_console(&dummy_con
, 0, MAX_NR_CONSOLES
- 1, 1);
522 ret
= do_unregister_con_driver(&vga_con
);
524 /* Ignore "already unregistered". */
534 static void i915_dump_device_info(struct drm_i915_private
*dev_priv
)
536 const struct intel_device_info
*info
= &dev_priv
->info
;
538 #define PRINT_S(name) "%s"
540 #define PRINT_FLAG(name) info->name ? #name "," : ""
542 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x rev=0x%02x flags="
543 DEV_INFO_FOR_EACH_FLAG(PRINT_S
, SEP_EMPTY
),
545 dev_priv
->dev
->pdev
->device
,
546 dev_priv
->dev
->pdev
->revision
,
547 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_COMMA
));
555 * Determine various intel_device_info fields at runtime.
557 * Use it when either:
558 * - it's judged too laborious to fill n static structures with the limit
559 * when a simple if statement does the job,
560 * - run-time checks (eg read fuse/strap registers) are needed.
562 * This function needs to be called:
563 * - after the MMIO has been setup as we are reading registers,
564 * - after the PCH has been detected,
565 * - before the first usage of the fields it can tweak.
567 static void intel_device_info_runtime_init(struct drm_device
*dev
)
569 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
570 struct intel_device_info
*info
;
573 info
= (struct intel_device_info
*)&dev_priv
->info
;
575 if (IS_VALLEYVIEW(dev
) || INTEL_INFO(dev
)->gen
== 9)
576 for_each_pipe(dev_priv
, pipe
)
577 info
->num_sprites
[pipe
] = 2;
579 for_each_pipe(dev_priv
, pipe
)
580 info
->num_sprites
[pipe
] = 1;
582 if (i915
.disable_display
) {
583 DRM_INFO("Display disabled (module parameter)\n");
585 } else if (info
->num_pipes
> 0 &&
586 (INTEL_INFO(dev
)->gen
== 7 || INTEL_INFO(dev
)->gen
== 8) &&
587 !IS_VALLEYVIEW(dev
)) {
588 u32 fuse_strap
= I915_READ(FUSE_STRAP
);
589 u32 sfuse_strap
= I915_READ(SFUSE_STRAP
);
592 * SFUSE_STRAP is supposed to have a bit signalling the display
593 * is fused off. Unfortunately it seems that, at least in
594 * certain cases, fused off display means that PCH display
595 * reads don't land anywhere. In that case, we read 0s.
597 * On CPT/PPT, we can detect this case as SFUSE_STRAP_FUSE_LOCK
598 * should be set when taking over after the firmware.
600 if (fuse_strap
& ILK_INTERNAL_DISPLAY_DISABLE
||
601 sfuse_strap
& SFUSE_STRAP_DISPLAY_DISABLED
||
602 (dev_priv
->pch_type
== PCH_CPT
&&
603 !(sfuse_strap
& SFUSE_STRAP_FUSE_LOCK
))) {
604 DRM_INFO("Display fused off, disabling\n");
609 /* Initialize slice/subslice/EU info */
610 if (IS_CHERRYVIEW(dev
)) {
613 fuse
= I915_READ(CHV_FUSE_GT
);
614 mask_eu
= fuse
& (CHV_FGT_EU_DIS_SS0_R0_MASK
|
615 CHV_FGT_EU_DIS_SS0_R1_MASK
|
616 CHV_FGT_EU_DIS_SS1_R0_MASK
|
617 CHV_FGT_EU_DIS_SS1_R1_MASK
);
618 info
->eu_total
= 16 - hweight32(mask_eu
);
619 } else if (IS_SKYLAKE(dev
)) {
620 const int s_max
= 3, ss_max
= 4, eu_max
= 8;
622 u32 fuse2
, eu_disable
[s_max
], s_enable
, ss_disable
;
624 fuse2
= I915_READ(GEN8_FUSE2
);
625 s_enable
= (fuse2
& GEN8_F2_S_ENA_MASK
) >>
627 ss_disable
= (fuse2
& GEN9_F2_SS_DIS_MASK
) >>
628 GEN9_F2_SS_DIS_SHIFT
;
630 eu_disable
[0] = I915_READ(GEN8_EU_DISABLE0
);
631 eu_disable
[1] = I915_READ(GEN8_EU_DISABLE1
);
632 eu_disable
[2] = I915_READ(GEN8_EU_DISABLE2
);
634 info
->slice_total
= hweight32(s_enable
);
636 * The subslice disable field is global, i.e. it applies
637 * to each of the enabled slices.
639 info
->subslice_per_slice
= ss_max
- hweight32(ss_disable
);
640 info
->subslice_total
= info
->slice_total
*
641 info
->subslice_per_slice
;
644 * Iterate through enabled slices and subslices to
645 * count the total enabled EU.
647 for (s
= 0; s
< s_max
; s
++) {
648 if (!(s_enable
& (0x1 << s
)))
649 /* skip disabled slice */
652 for (ss
= 0; ss
< ss_max
; ss
++) {
655 if (ss_disable
& (0x1 << ss
))
656 /* skip disabled subslice */
659 n_disabled
= hweight8(eu_disable
[s
] >>
663 * Record which subslice(s) has(have) 7 EUs. we
664 * can tune the hash used to spread work among
665 * subslices if they are unbalanced.
667 if (eu_max
- n_disabled
== 7)
668 info
->subslice_7eu
[s
] |= 1 << ss
;
670 info
->eu_total
+= eu_max
- n_disabled
;
675 * SKL is expected to always have a uniform distribution
676 * of EU across subslices with the exception that any one
677 * EU in any one subslice may be fused off for die
680 info
->eu_per_subslice
= info
->subslice_total
?
681 DIV_ROUND_UP(info
->eu_total
,
682 info
->subslice_total
) : 0;
684 * SKL supports slice power gating on devices with more than
685 * one slice, and supports EU power gating on devices with
686 * more than one EU pair per subslice.
688 info
->has_slice_pg
= (info
->slice_total
> 1) ? 1 : 0;
689 info
->has_subslice_pg
= 0;
690 info
->has_eu_pg
= (info
->eu_per_subslice
> 2) ? 1 : 0;
692 DRM_DEBUG_DRIVER("slice total: %u\n", info
->slice_total
);
693 DRM_DEBUG_DRIVER("subslice total: %u\n", info
->subslice_total
);
694 DRM_DEBUG_DRIVER("subslice per slice: %u\n", info
->subslice_per_slice
);
695 DRM_DEBUG_DRIVER("EU total: %u\n", info
->eu_total
);
696 DRM_DEBUG_DRIVER("EU per subslice: %u\n", info
->eu_per_subslice
);
697 DRM_DEBUG_DRIVER("has slice power gating: %s\n",
698 info
->has_slice_pg
? "y" : "n");
699 DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
700 info
->has_subslice_pg
? "y" : "n");
701 DRM_DEBUG_DRIVER("has EU power gating: %s\n",
702 info
->has_eu_pg
? "y" : "n");
706 * i915_driver_load - setup chip and create an initial config
708 * @flags: startup flags
710 * The driver load routine has to do several things:
711 * - drive output discovery via intel_modeset_init()
712 * - initialize the memory manager
713 * - allocate initial config memory
714 * - setup the DRM framebuffer with the allocated memory
716 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
718 struct drm_i915_private
*dev_priv
;
719 struct intel_device_info
*info
, *device_info
;
720 int ret
= 0, mmio_bar
, mmio_size
;
721 uint32_t aperture_size
;
723 info
= (struct intel_device_info
*) flags
;
725 dev_priv
= kzalloc(sizeof(*dev_priv
), GFP_KERNEL
);
726 if (dev_priv
== NULL
)
729 dev
->dev_private
= dev_priv
;
732 /* Setup the write-once "constant" device info */
733 device_info
= (struct intel_device_info
*)&dev_priv
->info
;
734 memcpy(device_info
, info
, sizeof(dev_priv
->info
));
735 device_info
->device_id
= dev
->pdev
->device
;
737 spin_lock_init(&dev_priv
->irq_lock
);
738 spin_lock_init(&dev_priv
->gpu_error
.lock
);
739 mutex_init(&dev_priv
->backlight_lock
);
740 spin_lock_init(&dev_priv
->uncore
.lock
);
741 spin_lock_init(&dev_priv
->mm
.object_stat_lock
);
742 spin_lock_init(&dev_priv
->mmio_flip_lock
);
743 mutex_init(&dev_priv
->dpio_lock
);
744 mutex_init(&dev_priv
->modeset_restore_lock
);
748 intel_display_crc_init(dev
);
750 i915_dump_device_info(dev_priv
);
752 /* Not all pre-production machines fall into this category, only the
753 * very first ones. Almost everything should work, except for maybe
754 * suspend/resume. And we don't implement workarounds that affect only
755 * pre-production machines. */
756 if (IS_HSW_EARLY_SDV(dev
))
757 DRM_INFO("This is an early pre-production Haswell machine. "
758 "It may not be fully functional.\n");
760 if (i915_get_bridge_dev(dev
)) {
765 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
766 /* Before gen4, the registers and the GTT are behind different BARs.
767 * However, from gen4 onwards, the registers and the GTT are shared
768 * in the same BAR, so we want to restrict this ioremap from
769 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
770 * the register BAR remains the same size for all the earlier
771 * generations up to Ironlake.
774 mmio_size
= 512*1024;
776 mmio_size
= 2*1024*1024;
778 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, mmio_size
);
779 if (!dev_priv
->regs
) {
780 DRM_ERROR("failed to map registers\n");
785 /* This must be called before any calls to HAS_PCH_* */
786 intel_detect_pch(dev
);
788 intel_uncore_init(dev
);
790 ret
= i915_gem_gtt_init(dev
);
794 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
795 * otherwise the vga fbdev driver falls over. */
796 ret
= i915_kick_out_firmware_fb(dev_priv
);
798 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
802 ret
= i915_kick_out_vgacon(dev_priv
);
804 DRM_ERROR("failed to remove conflicting VGA console\n");
808 pci_set_master(dev
->pdev
);
810 /* overlay on gen2 is broken and can't address above 1G */
812 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
814 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
815 * using 32bit addressing, overwriting memory if HWS is located
818 * The documentation also mentions an issue with undefined
819 * behaviour if any general state is accessed within a page above 4GB,
820 * which also needs to be handled carefully.
822 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
823 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(32));
825 aperture_size
= dev_priv
->gtt
.mappable_end
;
827 dev_priv
->gtt
.mappable
=
828 io_mapping_create_wc(dev_priv
->gtt
.mappable_base
,
830 if (dev_priv
->gtt
.mappable
== NULL
) {
835 dev_priv
->gtt
.mtrr
= arch_phys_wc_add(dev_priv
->gtt
.mappable_base
,
838 /* The i915 workqueue is primarily used for batched retirement of
839 * requests (and thus managing bo) once the task has been completed
840 * by the GPU. i915_gem_retire_requests() is called directly when we
841 * need high-priority retirement, such as waiting for an explicit
844 * It is also used for periodic low-priority events, such as
845 * idle-timers and recording error state.
847 * All tasks on the workqueue are expected to acquire the dev mutex
848 * so there is no point in running more than one instance of the
849 * workqueue at any time. Use an ordered one.
851 dev_priv
->wq
= alloc_ordered_workqueue("i915", 0);
852 if (dev_priv
->wq
== NULL
) {
853 DRM_ERROR("Failed to create our workqueue.\n");
858 dev_priv
->dp_wq
= alloc_ordered_workqueue("i915-dp", 0);
859 if (dev_priv
->dp_wq
== NULL
) {
860 DRM_ERROR("Failed to create our dp workqueue.\n");
865 dev_priv
->gpu_error
.hangcheck_wq
=
866 alloc_ordered_workqueue("i915-hangcheck", 0);
867 if (dev_priv
->gpu_error
.hangcheck_wq
== NULL
) {
868 DRM_ERROR("Failed to create our hangcheck workqueue.\n");
873 intel_irq_init(dev_priv
);
874 intel_uncore_sanitize(dev
);
876 /* Try to make sure MCHBAR is enabled before poking at it */
877 intel_setup_mchbar(dev
);
878 intel_setup_gmbus(dev
);
879 intel_opregion_setup(dev
);
881 intel_setup_bios(dev
);
885 /* On the 945G/GM, the chipset reports the MSI capability on the
886 * integrated graphics even though the support isn't actually there
887 * according to the published specs. It doesn't appear to function
888 * correctly in testing on 945G.
889 * This may be a side effect of MSI having been made available for PEG
890 * and the registers being closely associated.
892 * According to chipset errata, on the 965GM, MSI interrupts may
893 * be lost or delayed, but we use them anyways to avoid
894 * stuck interrupts on some machines.
896 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
897 pci_enable_msi(dev
->pdev
);
899 intel_device_info_runtime_init(dev
);
901 if (INTEL_INFO(dev
)->num_pipes
) {
902 ret
= drm_vblank_init(dev
, INTEL_INFO(dev
)->num_pipes
);
907 intel_power_domains_init(dev_priv
);
909 ret
= i915_load_modeset_init(dev
);
911 DRM_ERROR("failed to init modeset\n");
916 * Notify a valid surface after modesetting,
917 * when running inside a VM.
919 if (intel_vgpu_active(dev
))
920 I915_WRITE(vgtif_reg(display_ready
), VGT_DRV_DISPLAY_READY
);
922 i915_setup_sysfs(dev
);
924 if (INTEL_INFO(dev
)->num_pipes
) {
925 /* Must be done after probing outputs */
926 intel_opregion_init(dev
);
927 acpi_video_register();
931 intel_gpu_ips_init(dev_priv
);
933 intel_runtime_pm_enable(dev_priv
);
935 i915_audio_component_init(dev_priv
);
940 intel_power_domains_fini(dev_priv
);
941 drm_vblank_cleanup(dev
);
943 WARN_ON(unregister_oom_notifier(&dev_priv
->mm
.oom_notifier
));
944 unregister_shrinker(&dev_priv
->mm
.shrinker
);
946 if (dev
->pdev
->msi_enabled
)
947 pci_disable_msi(dev
->pdev
);
949 intel_teardown_gmbus(dev
);
950 intel_teardown_mchbar(dev
);
951 pm_qos_remove_request(&dev_priv
->pm_qos
);
952 destroy_workqueue(dev_priv
->gpu_error
.hangcheck_wq
);
954 destroy_workqueue(dev_priv
->dp_wq
);
956 destroy_workqueue(dev_priv
->wq
);
958 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
959 io_mapping_free(dev_priv
->gtt
.mappable
);
961 i915_global_gtt_cleanup(dev
);
963 intel_uncore_fini(dev
);
964 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
966 pci_dev_put(dev_priv
->bridge_dev
);
969 kmem_cache_destroy(dev_priv
->slab
);
974 int i915_driver_unload(struct drm_device
*dev
)
976 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
979 i915_audio_component_cleanup(dev_priv
);
981 ret
= i915_gem_suspend(dev
);
983 DRM_ERROR("failed to idle hardware: %d\n", ret
);
987 intel_power_domains_fini(dev_priv
);
989 intel_gpu_ips_teardown();
991 i915_teardown_sysfs(dev
);
993 WARN_ON(unregister_oom_notifier(&dev_priv
->mm
.oom_notifier
));
994 unregister_shrinker(&dev_priv
->mm
.shrinker
);
996 io_mapping_free(dev_priv
->gtt
.mappable
);
997 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
999 acpi_video_unregister();
1001 intel_fbdev_fini(dev
);
1003 drm_vblank_cleanup(dev
);
1005 intel_modeset_cleanup(dev
);
1008 * free the memory space allocated for the child device
1009 * config parsed from VBT
1011 if (dev_priv
->vbt
.child_dev
&& dev_priv
->vbt
.child_dev_num
) {
1012 kfree(dev_priv
->vbt
.child_dev
);
1013 dev_priv
->vbt
.child_dev
= NULL
;
1014 dev_priv
->vbt
.child_dev_num
= 0;
1017 vga_switcheroo_unregister_client(dev
->pdev
);
1018 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1020 /* Free error state after interrupts are fully disabled. */
1021 cancel_delayed_work_sync(&dev_priv
->gpu_error
.hangcheck_work
);
1022 i915_destroy_error_state(dev
);
1024 if (dev
->pdev
->msi_enabled
)
1025 pci_disable_msi(dev
->pdev
);
1027 intel_opregion_fini(dev
);
1029 /* Flush any outstanding unpin_work. */
1030 flush_workqueue(dev_priv
->wq
);
1032 mutex_lock(&dev
->struct_mutex
);
1033 i915_gem_cleanup_ringbuffer(dev
);
1034 i915_gem_batch_pool_fini(&dev_priv
->mm
.batch_pool
);
1035 i915_gem_context_fini(dev
);
1036 mutex_unlock(&dev
->struct_mutex
);
1037 i915_gem_cleanup_stolen(dev
);
1039 intel_teardown_gmbus(dev
);
1040 intel_teardown_mchbar(dev
);
1042 destroy_workqueue(dev_priv
->dp_wq
);
1043 destroy_workqueue(dev_priv
->wq
);
1044 destroy_workqueue(dev_priv
->gpu_error
.hangcheck_wq
);
1045 pm_qos_remove_request(&dev_priv
->pm_qos
);
1047 i915_global_gtt_cleanup(dev
);
1049 intel_uncore_fini(dev
);
1050 if (dev_priv
->regs
!= NULL
)
1051 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
1054 kmem_cache_destroy(dev_priv
->slab
);
1056 pci_dev_put(dev_priv
->bridge_dev
);
1062 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
1066 ret
= i915_gem_open(dev
, file
);
1074 * i915_driver_lastclose - clean up after all DRM clients have exited
1077 * Take care of cleaning up after all DRM clients have exited. In the
1078 * mode setting case, we want to restore the kernel's initial mode (just
1079 * in case the last client left us in a bad state).
1081 * Additionally, in the non-mode setting case, we'll tear down the GTT
1082 * and DMA structures, since the kernel won't be using them, and clea
1085 void i915_driver_lastclose(struct drm_device
*dev
)
1087 intel_fbdev_restore_mode(dev
);
1088 vga_switcheroo_process_delayed_switch();
1091 void i915_driver_preclose(struct drm_device
*dev
, struct drm_file
*file
)
1093 mutex_lock(&dev
->struct_mutex
);
1094 i915_gem_context_close(dev
, file
);
1095 i915_gem_release(dev
, file
);
1096 mutex_unlock(&dev
->struct_mutex
);
1098 intel_modeset_preclose(dev
, file
);
1101 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
1103 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1105 if (file_priv
&& file_priv
->bsd_ring
)
1106 file_priv
->bsd_ring
= NULL
;
1111 i915_gem_reject_pin_ioctl(struct drm_device
*dev
, void *data
,
1112 struct drm_file
*file
)
1117 const struct drm_ioctl_desc i915_ioctls
[] = {
1118 DRM_IOCTL_DEF_DRV(I915_INIT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1119 DRM_IOCTL_DEF_DRV(I915_FLUSH
, drm_noop
, DRM_AUTH
),
1120 DRM_IOCTL_DEF_DRV(I915_FLIP
, drm_noop
, DRM_AUTH
),
1121 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, drm_noop
, DRM_AUTH
),
1122 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, drm_noop
, DRM_AUTH
),
1123 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, drm_noop
, DRM_AUTH
),
1124 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1125 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1126 DRM_IOCTL_DEF_DRV(I915_ALLOC
, drm_noop
, DRM_AUTH
),
1127 DRM_IOCTL_DEF_DRV(I915_FREE
, drm_noop
, DRM_AUTH
),
1128 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1129 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, drm_noop
, DRM_AUTH
),
1130 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1131 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1132 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, drm_noop
, DRM_AUTH
),
1133 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, drm_noop
, DRM_AUTH
),
1134 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1135 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1136 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
1137 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1138 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_reject_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1139 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_reject_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1140 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1141 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING
, i915_gem_set_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1142 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING
, i915_gem_get_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1143 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1144 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1145 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1146 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1147 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1148 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1149 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1150 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1151 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1152 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1153 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1154 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1155 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1156 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
1157 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1158 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1159 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1160 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY
, intel_sprite_set_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1161 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY
, intel_sprite_get_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1162 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT
, i915_gem_wait_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1163 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE
, i915_gem_context_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1164 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY
, i915_gem_context_destroy_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1165 DRM_IOCTL_DEF_DRV(I915_REG_READ
, i915_reg_read_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1166 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS
, i915_get_reset_stats_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1167 DRM_IOCTL_DEF_DRV(I915_GEM_USERPTR
, i915_gem_userptr_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1168 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_GETPARAM
, i915_gem_context_getparam_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1169 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_SETPARAM
, i915_gem_context_setparam_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1172 int i915_max_ioctl
= ARRAY_SIZE(i915_ioctls
);
1175 * This is really ugly: Because old userspace abused the linux agp interface to
1176 * manage the gtt, we need to claim that all intel devices are agp. For
1177 * otherwise the drm core refuses to initialize the agp support code.
1179 int i915_driver_device_is_agp(struct drm_device
*dev
)