1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
30 #include <linux/device.h>
31 #include <linux/acpi.h>
33 #include <drm/i915_drm.h>
35 #include "i915_trace.h"
36 #include "intel_drv.h"
38 #include <linux/console.h>
39 #include <linux/module.h>
40 #include <linux/pm_runtime.h>
41 #include <drm/drm_crtc_helper.h>
43 static struct drm_driver driver
;
45 #define GEN_DEFAULT_PIPEOFFSETS \
46 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
47 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
48 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
49 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
52 #define GEN_CHV_PIPEOFFSETS \
53 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
54 CHV_PIPE_C_OFFSET }, \
55 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
56 CHV_TRANSCODER_C_OFFSET, }, \
57 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
58 CHV_PALETTE_C_OFFSET }
60 #define CURSOR_OFFSETS \
61 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
63 #define IVB_CURSOR_OFFSETS \
64 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
66 static const struct intel_device_info intel_i830_info
= {
67 .gen
= 2, .is_mobile
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
68 .has_overlay
= 1, .overlay_needs_physical
= 1,
69 .ring_mask
= RENDER_RING
,
70 GEN_DEFAULT_PIPEOFFSETS
,
74 static const struct intel_device_info intel_845g_info
= {
75 .gen
= 2, .num_pipes
= 1,
76 .has_overlay
= 1, .overlay_needs_physical
= 1,
77 .ring_mask
= RENDER_RING
,
78 GEN_DEFAULT_PIPEOFFSETS
,
82 static const struct intel_device_info intel_i85x_info
= {
83 .gen
= 2, .is_i85x
= 1, .is_mobile
= 1, .num_pipes
= 2,
84 .cursor_needs_physical
= 1,
85 .has_overlay
= 1, .overlay_needs_physical
= 1,
87 .ring_mask
= RENDER_RING
,
88 GEN_DEFAULT_PIPEOFFSETS
,
92 static const struct intel_device_info intel_i865g_info
= {
93 .gen
= 2, .num_pipes
= 1,
94 .has_overlay
= 1, .overlay_needs_physical
= 1,
95 .ring_mask
= RENDER_RING
,
96 GEN_DEFAULT_PIPEOFFSETS
,
100 static const struct intel_device_info intel_i915g_info
= {
101 .gen
= 3, .is_i915g
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
102 .has_overlay
= 1, .overlay_needs_physical
= 1,
103 .ring_mask
= RENDER_RING
,
104 GEN_DEFAULT_PIPEOFFSETS
,
107 static const struct intel_device_info intel_i915gm_info
= {
108 .gen
= 3, .is_mobile
= 1, .num_pipes
= 2,
109 .cursor_needs_physical
= 1,
110 .has_overlay
= 1, .overlay_needs_physical
= 1,
113 .ring_mask
= RENDER_RING
,
114 GEN_DEFAULT_PIPEOFFSETS
,
117 static const struct intel_device_info intel_i945g_info
= {
118 .gen
= 3, .has_hotplug
= 1, .cursor_needs_physical
= 1, .num_pipes
= 2,
119 .has_overlay
= 1, .overlay_needs_physical
= 1,
120 .ring_mask
= RENDER_RING
,
121 GEN_DEFAULT_PIPEOFFSETS
,
124 static const struct intel_device_info intel_i945gm_info
= {
125 .gen
= 3, .is_i945gm
= 1, .is_mobile
= 1, .num_pipes
= 2,
126 .has_hotplug
= 1, .cursor_needs_physical
= 1,
127 .has_overlay
= 1, .overlay_needs_physical
= 1,
130 .ring_mask
= RENDER_RING
,
131 GEN_DEFAULT_PIPEOFFSETS
,
135 static const struct intel_device_info intel_i965g_info
= {
136 .gen
= 4, .is_broadwater
= 1, .num_pipes
= 2,
139 .ring_mask
= RENDER_RING
,
140 GEN_DEFAULT_PIPEOFFSETS
,
144 static const struct intel_device_info intel_i965gm_info
= {
145 .gen
= 4, .is_crestline
= 1, .num_pipes
= 2,
146 .is_mobile
= 1, .has_fbc
= 1, .has_hotplug
= 1,
149 .ring_mask
= RENDER_RING
,
150 GEN_DEFAULT_PIPEOFFSETS
,
154 static const struct intel_device_info intel_g33_info
= {
155 .gen
= 3, .is_g33
= 1, .num_pipes
= 2,
156 .need_gfx_hws
= 1, .has_hotplug
= 1,
158 .ring_mask
= RENDER_RING
,
159 GEN_DEFAULT_PIPEOFFSETS
,
163 static const struct intel_device_info intel_g45_info
= {
164 .gen
= 4, .is_g4x
= 1, .need_gfx_hws
= 1, .num_pipes
= 2,
165 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
166 .ring_mask
= RENDER_RING
| BSD_RING
,
167 GEN_DEFAULT_PIPEOFFSETS
,
171 static const struct intel_device_info intel_gm45_info
= {
172 .gen
= 4, .is_g4x
= 1, .num_pipes
= 2,
173 .is_mobile
= 1, .need_gfx_hws
= 1, .has_fbc
= 1,
174 .has_pipe_cxsr
= 1, .has_hotplug
= 1,
176 .ring_mask
= RENDER_RING
| BSD_RING
,
177 GEN_DEFAULT_PIPEOFFSETS
,
181 static const struct intel_device_info intel_pineview_info
= {
182 .gen
= 3, .is_g33
= 1, .is_pineview
= 1, .is_mobile
= 1, .num_pipes
= 2,
183 .need_gfx_hws
= 1, .has_hotplug
= 1,
185 GEN_DEFAULT_PIPEOFFSETS
,
189 static const struct intel_device_info intel_ironlake_d_info
= {
190 .gen
= 5, .num_pipes
= 2,
191 .need_gfx_hws
= 1, .has_hotplug
= 1,
192 .ring_mask
= RENDER_RING
| BSD_RING
,
193 GEN_DEFAULT_PIPEOFFSETS
,
197 static const struct intel_device_info intel_ironlake_m_info
= {
198 .gen
= 5, .is_mobile
= 1, .num_pipes
= 2,
199 .need_gfx_hws
= 1, .has_hotplug
= 1,
201 .ring_mask
= RENDER_RING
| BSD_RING
,
202 GEN_DEFAULT_PIPEOFFSETS
,
206 static const struct intel_device_info intel_sandybridge_d_info
= {
207 .gen
= 6, .num_pipes
= 2,
208 .need_gfx_hws
= 1, .has_hotplug
= 1,
210 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
,
212 GEN_DEFAULT_PIPEOFFSETS
,
216 static const struct intel_device_info intel_sandybridge_m_info
= {
217 .gen
= 6, .is_mobile
= 1, .num_pipes
= 2,
218 .need_gfx_hws
= 1, .has_hotplug
= 1,
220 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
,
222 GEN_DEFAULT_PIPEOFFSETS
,
226 #define GEN7_FEATURES \
227 .gen = 7, .num_pipes = 3, \
228 .need_gfx_hws = 1, .has_hotplug = 1, \
230 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
233 static const struct intel_device_info intel_ivybridge_d_info
= {
236 GEN_DEFAULT_PIPEOFFSETS
,
240 static const struct intel_device_info intel_ivybridge_m_info
= {
244 GEN_DEFAULT_PIPEOFFSETS
,
248 static const struct intel_device_info intel_ivybridge_q_info
= {
251 .num_pipes
= 0, /* legal, last one wins */
252 GEN_DEFAULT_PIPEOFFSETS
,
256 static const struct intel_device_info intel_valleyview_m_info
= {
261 .display_mmio_offset
= VLV_DISPLAY_BASE
,
262 .has_fbc
= 0, /* legal, last one wins */
263 .has_llc
= 0, /* legal, last one wins */
264 GEN_DEFAULT_PIPEOFFSETS
,
268 static const struct intel_device_info intel_valleyview_d_info
= {
272 .display_mmio_offset
= VLV_DISPLAY_BASE
,
273 .has_fbc
= 0, /* legal, last one wins */
274 .has_llc
= 0, /* legal, last one wins */
275 GEN_DEFAULT_PIPEOFFSETS
,
279 static const struct intel_device_info intel_haswell_d_info
= {
284 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
285 GEN_DEFAULT_PIPEOFFSETS
,
289 static const struct intel_device_info intel_haswell_m_info
= {
295 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
296 GEN_DEFAULT_PIPEOFFSETS
,
300 static const struct intel_device_info intel_broadwell_d_info
= {
301 .gen
= 8, .num_pipes
= 3,
302 .need_gfx_hws
= 1, .has_hotplug
= 1,
303 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
307 GEN_DEFAULT_PIPEOFFSETS
,
311 static const struct intel_device_info intel_broadwell_m_info
= {
312 .gen
= 8, .is_mobile
= 1, .num_pipes
= 3,
313 .need_gfx_hws
= 1, .has_hotplug
= 1,
314 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
318 GEN_DEFAULT_PIPEOFFSETS
,
322 static const struct intel_device_info intel_broadwell_gt3d_info
= {
323 .gen
= 8, .num_pipes
= 3,
324 .need_gfx_hws
= 1, .has_hotplug
= 1,
325 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
329 GEN_DEFAULT_PIPEOFFSETS
,
333 static const struct intel_device_info intel_broadwell_gt3m_info
= {
334 .gen
= 8, .is_mobile
= 1, .num_pipes
= 3,
335 .need_gfx_hws
= 1, .has_hotplug
= 1,
336 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
| BSD2_RING
,
340 GEN_DEFAULT_PIPEOFFSETS
,
344 static const struct intel_device_info intel_cherryview_info
= {
346 .gen
= 8, .num_pipes
= 3,
347 .need_gfx_hws
= 1, .has_hotplug
= 1,
348 .ring_mask
= RENDER_RING
| BSD_RING
| BLT_RING
| VEBOX_RING
,
350 .display_mmio_offset
= VLV_DISPLAY_BASE
,
356 * Make sure any device matches here are from most specific to most
357 * general. For example, since the Quanta match is based on the subsystem
358 * and subvendor IDs, we need it to come before the more general IVB
359 * PCI ID matches, otherwise we'll use the wrong info struct above.
361 #define INTEL_PCI_IDS \
362 INTEL_I830_IDS(&intel_i830_info), \
363 INTEL_I845G_IDS(&intel_845g_info), \
364 INTEL_I85X_IDS(&intel_i85x_info), \
365 INTEL_I865G_IDS(&intel_i865g_info), \
366 INTEL_I915G_IDS(&intel_i915g_info), \
367 INTEL_I915GM_IDS(&intel_i915gm_info), \
368 INTEL_I945G_IDS(&intel_i945g_info), \
369 INTEL_I945GM_IDS(&intel_i945gm_info), \
370 INTEL_I965G_IDS(&intel_i965g_info), \
371 INTEL_G33_IDS(&intel_g33_info), \
372 INTEL_I965GM_IDS(&intel_i965gm_info), \
373 INTEL_GM45_IDS(&intel_gm45_info), \
374 INTEL_G45_IDS(&intel_g45_info), \
375 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
376 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
377 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
378 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
379 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
380 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
381 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
382 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
383 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
384 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
385 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
386 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
387 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
388 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
389 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
390 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
391 INTEL_CHV_IDS(&intel_cherryview_info)
393 static const struct pci_device_id pciidlist
[] = { /* aka */
398 #if defined(CONFIG_DRM_I915_KMS)
399 MODULE_DEVICE_TABLE(pci
, pciidlist
);
402 void intel_detect_pch(struct drm_device
*dev
)
404 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
405 struct pci_dev
*pch
= NULL
;
407 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
408 * (which really amounts to a PCH but no South Display).
410 if (INTEL_INFO(dev
)->num_pipes
== 0) {
411 dev_priv
->pch_type
= PCH_NOP
;
416 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
417 * make graphics device passthrough work easy for VMM, that only
418 * need to expose ISA bridge to let driver know the real hardware
419 * underneath. This is a requirement from virtualization team.
421 * In some virtualized environments (e.g. XEN), there is irrelevant
422 * ISA bridge in the system. To work reliably, we should scan trhough
423 * all the ISA bridge devices and check for the first match, instead
424 * of only checking the first one.
426 while ((pch
= pci_get_class(PCI_CLASS_BRIDGE_ISA
<< 8, pch
))) {
427 if (pch
->vendor
== PCI_VENDOR_ID_INTEL
) {
428 unsigned short id
= pch
->device
& INTEL_PCH_DEVICE_ID_MASK
;
429 dev_priv
->pch_id
= id
;
431 if (id
== INTEL_PCH_IBX_DEVICE_ID_TYPE
) {
432 dev_priv
->pch_type
= PCH_IBX
;
433 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
434 WARN_ON(!IS_GEN5(dev
));
435 } else if (id
== INTEL_PCH_CPT_DEVICE_ID_TYPE
) {
436 dev_priv
->pch_type
= PCH_CPT
;
437 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
438 WARN_ON(!(IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)));
439 } else if (id
== INTEL_PCH_PPT_DEVICE_ID_TYPE
) {
440 /* PantherPoint is CPT compatible */
441 dev_priv
->pch_type
= PCH_CPT
;
442 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
443 WARN_ON(!(IS_GEN6(dev
) || IS_IVYBRIDGE(dev
)));
444 } else if (id
== INTEL_PCH_LPT_DEVICE_ID_TYPE
) {
445 dev_priv
->pch_type
= PCH_LPT
;
446 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
447 WARN_ON(!IS_HASWELL(dev
));
448 WARN_ON(IS_ULT(dev
));
449 } else if (IS_BROADWELL(dev
)) {
450 dev_priv
->pch_type
= PCH_LPT
;
452 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
;
453 DRM_DEBUG_KMS("This is Broadwell, assuming "
454 "LynxPoint LP PCH\n");
455 } else if (id
== INTEL_PCH_LPT_LP_DEVICE_ID_TYPE
) {
456 dev_priv
->pch_type
= PCH_LPT
;
457 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
458 WARN_ON(!IS_HASWELL(dev
));
459 WARN_ON(!IS_ULT(dev
));
467 DRM_DEBUG_KMS("No PCH found.\n");
472 bool i915_semaphore_is_enabled(struct drm_device
*dev
)
474 if (INTEL_INFO(dev
)->gen
< 6)
477 if (i915
.semaphores
>= 0)
478 return i915
.semaphores
;
480 /* Until we get further testing... */
484 #ifdef CONFIG_INTEL_IOMMU
485 /* Enable semaphores on SNB when IO remapping is off */
486 if (INTEL_INFO(dev
)->gen
== 6 && intel_iommu_gfx_mapped
)
493 static int i915_drm_freeze(struct drm_device
*dev
)
495 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
496 struct drm_crtc
*crtc
;
497 pci_power_t opregion_target_state
;
499 intel_runtime_pm_get(dev_priv
);
501 /* ignore lid events during suspend */
502 mutex_lock(&dev_priv
->modeset_restore_lock
);
503 dev_priv
->modeset_restore
= MODESET_SUSPENDED
;
504 mutex_unlock(&dev_priv
->modeset_restore_lock
);
506 /* We do a lot of poking in a lot of registers, make sure they work
508 intel_display_set_init_power(dev_priv
, true);
510 drm_kms_helper_poll_disable(dev
);
512 pci_save_state(dev
->pdev
);
514 /* If KMS is active, we do the leavevt stuff here */
515 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
518 error
= i915_gem_suspend(dev
);
520 dev_err(&dev
->pdev
->dev
,
521 "GEM idle failed, resume might fail\n");
525 drm_irq_uninstall(dev
);
526 dev_priv
->enable_hotplug_processing
= false;
528 intel_suspend_gt_powersave(dev
);
531 * Disable CRTCs directly since we want to preserve sw state
534 drm_modeset_lock_all(dev
);
535 for_each_crtc(dev
, crtc
) {
536 dev_priv
->display
.crtc_disable(crtc
);
538 drm_modeset_unlock_all(dev
);
540 intel_modeset_suspend_hw(dev
);
543 i915_gem_suspend_gtt_mappings(dev
);
545 i915_save_state(dev
);
547 if (acpi_target_system_state() >= ACPI_STATE_S3
)
548 opregion_target_state
= PCI_D3cold
;
550 opregion_target_state
= PCI_D1
;
551 intel_opregion_notify_adapter(dev
, opregion_target_state
);
553 intel_uncore_forcewake_reset(dev
, false);
554 intel_opregion_fini(dev
);
557 intel_fbdev_set_suspend(dev
, FBINFO_STATE_SUSPENDED
);
560 dev_priv
->suspend_count
++;
562 intel_display_set_init_power(dev_priv
, false);
567 int i915_suspend(struct drm_device
*dev
, pm_message_t state
)
571 if (!dev
|| !dev
->dev_private
) {
572 DRM_ERROR("dev: %p\n", dev
);
573 DRM_ERROR("DRM not initialized, aborting suspend.\n");
577 if (state
.event
== PM_EVENT_PRETHAW
)
581 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
584 error
= i915_drm_freeze(dev
);
588 if (state
.event
== PM_EVENT_SUSPEND
) {
589 /* Shut down the device */
590 pci_disable_device(dev
->pdev
);
591 pci_set_power_state(dev
->pdev
, PCI_D3hot
);
597 void intel_console_resume(struct work_struct
*work
)
599 struct drm_i915_private
*dev_priv
=
600 container_of(work
, struct drm_i915_private
,
601 console_resume_work
);
602 struct drm_device
*dev
= dev_priv
->dev
;
605 intel_fbdev_set_suspend(dev
, FBINFO_STATE_RUNNING
);
609 static int i915_drm_thaw_early(struct drm_device
*dev
)
611 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
613 if (IS_HASWELL(dev
) || IS_BROADWELL(dev
))
614 hsw_disable_pc8(dev_priv
);
616 intel_uncore_early_sanitize(dev
, true);
617 intel_uncore_sanitize(dev
);
618 intel_power_domains_init_hw(dev_priv
);
623 static int __i915_drm_thaw(struct drm_device
*dev
, bool restore_gtt_mappings
)
625 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
627 if (drm_core_check_feature(dev
, DRIVER_MODESET
) &&
628 restore_gtt_mappings
) {
629 mutex_lock(&dev
->struct_mutex
);
630 i915_gem_restore_gtt_mappings(dev
);
631 mutex_unlock(&dev
->struct_mutex
);
634 i915_restore_state(dev
);
635 intel_opregion_setup(dev
);
637 /* KMS EnterVT equivalent */
638 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
639 intel_init_pch_refclk(dev
);
640 drm_mode_config_reset(dev
);
642 mutex_lock(&dev
->struct_mutex
);
643 if (i915_gem_init_hw(dev
)) {
644 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
645 atomic_set_mask(I915_WEDGED
, &dev_priv
->gpu_error
.reset_counter
);
647 mutex_unlock(&dev
->struct_mutex
);
649 /* We need working interrupts for modeset enabling ... */
650 drm_irq_install(dev
, dev
->pdev
->irq
);
652 intel_modeset_init_hw(dev
);
654 drm_modeset_lock_all(dev
);
655 intel_modeset_setup_hw_state(dev
, true);
656 drm_modeset_unlock_all(dev
);
659 * ... but also need to make sure that hotplug processing
660 * doesn't cause havoc. Like in the driver load code we don't
661 * bother with the tiny race here where we might loose hotplug
665 dev_priv
->enable_hotplug_processing
= true;
666 /* Config may have changed between suspend and resume */
667 drm_helper_hpd_irq_event(dev
);
670 intel_opregion_init(dev
);
673 * The console lock can be pretty contented on resume due
674 * to all the printk activity. Try to keep it out of the hot
675 * path of resume if possible.
677 if (console_trylock()) {
678 intel_fbdev_set_suspend(dev
, FBINFO_STATE_RUNNING
);
681 schedule_work(&dev_priv
->console_resume_work
);
684 mutex_lock(&dev_priv
->modeset_restore_lock
);
685 dev_priv
->modeset_restore
= MODESET_DONE
;
686 mutex_unlock(&dev_priv
->modeset_restore_lock
);
688 intel_opregion_notify_adapter(dev
, PCI_D0
);
690 intel_runtime_pm_put(dev_priv
);
694 static int i915_drm_thaw(struct drm_device
*dev
)
696 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
697 i915_check_and_clear_faults(dev
);
699 return __i915_drm_thaw(dev
, true);
702 static int i915_resume_early(struct drm_device
*dev
)
704 if (dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
708 * We have a resume ordering issue with the snd-hda driver also
709 * requiring our device to be power up. Due to the lack of a
710 * parent/child relationship we currently solve this with an early
713 * FIXME: This should be solved with a special hdmi sink device or
714 * similar so that power domains can be employed.
716 if (pci_enable_device(dev
->pdev
))
719 pci_set_master(dev
->pdev
);
721 return i915_drm_thaw_early(dev
);
724 int i915_resume(struct drm_device
*dev
)
726 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
730 * Platforms with opregion should have sane BIOS, older ones (gen3 and
731 * earlier) need to restore the GTT mappings since the BIOS might clear
732 * all our scratch PTEs.
734 ret
= __i915_drm_thaw(dev
, !dev_priv
->opregion
.header
);
738 drm_kms_helper_poll_enable(dev
);
742 static int i915_resume_legacy(struct drm_device
*dev
)
744 i915_resume_early(dev
);
751 * i915_reset - reset chip after a hang
752 * @dev: drm device to reset
754 * Reset the chip. Useful if a hang is detected. Returns zero on successful
755 * reset or otherwise an error code.
757 * Procedure is fairly simple:
758 * - reset the chip using the reset reg
759 * - re-init context state
760 * - re-init hardware status page
761 * - re-init ring buffer
762 * - re-init interrupt state
765 int i915_reset(struct drm_device
*dev
)
767 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
774 mutex_lock(&dev
->struct_mutex
);
778 simulated
= dev_priv
->gpu_error
.stop_rings
!= 0;
780 ret
= intel_gpu_reset(dev
);
782 /* Also reset the gpu hangman. */
784 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
785 dev_priv
->gpu_error
.stop_rings
= 0;
786 if (ret
== -ENODEV
) {
787 DRM_INFO("Reset not implemented, but ignoring "
788 "error for simulated gpu hangs\n");
794 DRM_ERROR("Failed to reset chip: %i\n", ret
);
795 mutex_unlock(&dev
->struct_mutex
);
799 /* Ok, now get things going again... */
802 * Everything depends on having the GTT running, so we need to start
803 * there. Fortunately we don't need to do this unless we reset the
804 * chip at a PCI level.
806 * Next we need to restore the context, but we don't use those
809 * Ring buffer needs to be re-initialized in the KMS case, or if X
810 * was running at the time of the reset (i.e. we weren't VT
813 if (drm_core_check_feature(dev
, DRIVER_MODESET
) ||
814 !dev_priv
->ums
.mm_suspended
) {
815 dev_priv
->ums
.mm_suspended
= 0;
817 ret
= i915_gem_init_hw(dev
);
818 mutex_unlock(&dev
->struct_mutex
);
820 DRM_ERROR("Failed hw init on reset %d\n", ret
);
825 * FIXME: This races pretty badly against concurrent holders of
826 * ring interrupts. This is possible since we've started to drop
827 * dev->struct_mutex in select places when waiting for the gpu.
831 * rps/rc6 re-init is necessary to restore state lost after the
832 * reset and the re-install of gt irqs. Skip for ironlake per
833 * previous concerns that it doesn't respond well to some forms
834 * of re-init after reset.
836 if (INTEL_INFO(dev
)->gen
> 5)
837 intel_reset_gt_powersave(dev
);
841 mutex_unlock(&dev
->struct_mutex
);
847 static int i915_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
849 struct intel_device_info
*intel_info
=
850 (struct intel_device_info
*) ent
->driver_data
;
852 if (IS_PRELIMINARY_HW(intel_info
) && !i915
.preliminary_hw_support
) {
853 DRM_INFO("This hardware requires preliminary hardware support.\n"
854 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
858 /* Only bind to function 0 of the device. Early generations
859 * used function 1 as a placeholder for multi-head. This causes
860 * us confusion instead, especially on the systems where both
861 * functions have the same PCI-ID!
863 if (PCI_FUNC(pdev
->devfn
))
866 driver
.driver_features
&= ~(DRIVER_USE_AGP
);
868 return drm_get_pci_dev(pdev
, ent
, &driver
);
872 i915_pci_remove(struct pci_dev
*pdev
)
874 struct drm_device
*dev
= pci_get_drvdata(pdev
);
879 static int i915_pm_suspend(struct device
*dev
)
881 struct pci_dev
*pdev
= to_pci_dev(dev
);
882 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
884 if (!drm_dev
|| !drm_dev
->dev_private
) {
885 dev_err(dev
, "DRM not initialized, aborting suspend.\n");
889 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
892 return i915_drm_freeze(drm_dev
);
895 static int i915_pm_suspend_late(struct device
*dev
)
897 struct pci_dev
*pdev
= to_pci_dev(dev
);
898 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
899 struct drm_i915_private
*dev_priv
= drm_dev
->dev_private
;
902 * We have a suspedn ordering issue with the snd-hda driver also
903 * requiring our device to be power up. Due to the lack of a
904 * parent/child relationship we currently solve this with an late
907 * FIXME: This should be solved with a special hdmi sink device or
908 * similar so that power domains can be employed.
910 if (drm_dev
->switch_power_state
== DRM_SWITCH_POWER_OFF
)
913 if (IS_HASWELL(drm_dev
) || IS_BROADWELL(drm_dev
))
914 hsw_enable_pc8(dev_priv
);
916 pci_disable_device(pdev
);
917 pci_set_power_state(pdev
, PCI_D3hot
);
922 static int i915_pm_resume_early(struct device
*dev
)
924 struct pci_dev
*pdev
= to_pci_dev(dev
);
925 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
927 return i915_resume_early(drm_dev
);
930 static int i915_pm_resume(struct device
*dev
)
932 struct pci_dev
*pdev
= to_pci_dev(dev
);
933 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
935 return i915_resume(drm_dev
);
938 static int i915_pm_freeze(struct device
*dev
)
940 struct pci_dev
*pdev
= to_pci_dev(dev
);
941 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
943 if (!drm_dev
|| !drm_dev
->dev_private
) {
944 dev_err(dev
, "DRM not initialized, aborting suspend.\n");
948 return i915_drm_freeze(drm_dev
);
951 static int i915_pm_thaw_early(struct device
*dev
)
953 struct pci_dev
*pdev
= to_pci_dev(dev
);
954 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
956 return i915_drm_thaw_early(drm_dev
);
959 static int i915_pm_thaw(struct device
*dev
)
961 struct pci_dev
*pdev
= to_pci_dev(dev
);
962 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
964 return i915_drm_thaw(drm_dev
);
967 static int i915_pm_poweroff(struct device
*dev
)
969 struct pci_dev
*pdev
= to_pci_dev(dev
);
970 struct drm_device
*drm_dev
= pci_get_drvdata(pdev
);
972 return i915_drm_freeze(drm_dev
);
975 static int hsw_runtime_suspend(struct drm_i915_private
*dev_priv
)
977 hsw_enable_pc8(dev_priv
);
982 static int snb_runtime_resume(struct drm_i915_private
*dev_priv
)
984 struct drm_device
*dev
= dev_priv
->dev
;
986 intel_init_pch_refclk(dev
);
991 static int hsw_runtime_resume(struct drm_i915_private
*dev_priv
)
993 hsw_disable_pc8(dev_priv
);
999 * Save all Gunit registers that may be lost after a D3 and a subsequent
1000 * S0i[R123] transition. The list of registers needing a save/restore is
1001 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
1002 * registers in the following way:
1003 * - Driver: saved/restored by the driver
1004 * - Punit : saved/restored by the Punit firmware
1005 * - No, w/o marking: no need to save/restore, since the register is R/O or
1006 * used internally by the HW in a way that doesn't depend
1007 * keeping the content across a suspend/resume.
1008 * - Debug : used for debugging
1010 * We save/restore all registers marked with 'Driver', with the following
1012 * - Registers out of use, including also registers marked with 'Debug'.
1013 * These have no effect on the driver's operation, so we don't save/restore
1014 * them to reduce the overhead.
1015 * - Registers that are fully setup by an initialization function called from
1016 * the resume path. For example many clock gating and RPS/RC6 registers.
1017 * - Registers that provide the right functionality with their reset defaults.
1019 * TODO: Except for registers that based on the above 3 criteria can be safely
1020 * ignored, we save/restore all others, practically treating the HW context as
1021 * a black-box for the driver. Further investigation is needed to reduce the
1022 * saved/restored registers even further, by following the same 3 criteria.
1024 static void vlv_save_gunit_s0ix_state(struct drm_i915_private
*dev_priv
)
1026 struct vlv_s0ix_state
*s
= &dev_priv
->vlv_s0ix_state
;
1029 /* GAM 0x4000-0x4770 */
1030 s
->wr_watermark
= I915_READ(GEN7_WR_WATERMARK
);
1031 s
->gfx_prio_ctrl
= I915_READ(GEN7_GFX_PRIO_CTRL
);
1032 s
->arb_mode
= I915_READ(ARB_MODE
);
1033 s
->gfx_pend_tlb0
= I915_READ(GEN7_GFX_PEND_TLB0
);
1034 s
->gfx_pend_tlb1
= I915_READ(GEN7_GFX_PEND_TLB1
);
1036 for (i
= 0; i
< ARRAY_SIZE(s
->lra_limits
); i
++)
1037 s
->lra_limits
[i
] = I915_READ(GEN7_LRA_LIMITS_BASE
+ i
* 4);
1039 s
->media_max_req_count
= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT
);
1040 s
->gfx_max_req_count
= I915_READ(GEN7_MEDIA_MAX_REQ_COUNT
);
1042 s
->render_hwsp
= I915_READ(RENDER_HWS_PGA_GEN7
);
1043 s
->ecochk
= I915_READ(GAM_ECOCHK
);
1044 s
->bsd_hwsp
= I915_READ(BSD_HWS_PGA_GEN7
);
1045 s
->blt_hwsp
= I915_READ(BLT_HWS_PGA_GEN7
);
1047 s
->tlb_rd_addr
= I915_READ(GEN7_TLB_RD_ADDR
);
1049 /* MBC 0x9024-0x91D0, 0x8500 */
1050 s
->g3dctl
= I915_READ(VLV_G3DCTL
);
1051 s
->gsckgctl
= I915_READ(VLV_GSCKGCTL
);
1052 s
->mbctl
= I915_READ(GEN6_MBCTL
);
1054 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1055 s
->ucgctl1
= I915_READ(GEN6_UCGCTL1
);
1056 s
->ucgctl3
= I915_READ(GEN6_UCGCTL3
);
1057 s
->rcgctl1
= I915_READ(GEN6_RCGCTL1
);
1058 s
->rcgctl2
= I915_READ(GEN6_RCGCTL2
);
1059 s
->rstctl
= I915_READ(GEN6_RSTCTL
);
1060 s
->misccpctl
= I915_READ(GEN7_MISCCPCTL
);
1062 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1063 s
->gfxpause
= I915_READ(GEN6_GFXPAUSE
);
1064 s
->rpdeuhwtc
= I915_READ(GEN6_RPDEUHWTC
);
1065 s
->rpdeuc
= I915_READ(GEN6_RPDEUC
);
1066 s
->ecobus
= I915_READ(ECOBUS
);
1067 s
->pwrdwnupctl
= I915_READ(VLV_PWRDWNUPCTL
);
1068 s
->rp_down_timeout
= I915_READ(GEN6_RP_DOWN_TIMEOUT
);
1069 s
->rp_deucsw
= I915_READ(GEN6_RPDEUCSW
);
1070 s
->rcubmabdtmr
= I915_READ(GEN6_RCUBMABDTMR
);
1071 s
->rcedata
= I915_READ(VLV_RCEDATA
);
1072 s
->spare2gh
= I915_READ(VLV_SPAREG2H
);
1074 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1075 s
->gt_imr
= I915_READ(GTIMR
);
1076 s
->gt_ier
= I915_READ(GTIER
);
1077 s
->pm_imr
= I915_READ(GEN6_PMIMR
);
1078 s
->pm_ier
= I915_READ(GEN6_PMIER
);
1080 for (i
= 0; i
< ARRAY_SIZE(s
->gt_scratch
); i
++)
1081 s
->gt_scratch
[i
] = I915_READ(GEN7_GT_SCRATCH_BASE
+ i
* 4);
1083 /* GT SA CZ domain, 0x100000-0x138124 */
1084 s
->tilectl
= I915_READ(TILECTL
);
1085 s
->gt_fifoctl
= I915_READ(GTFIFOCTL
);
1086 s
->gtlc_wake_ctrl
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1087 s
->gtlc_survive
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1088 s
->pmwgicz
= I915_READ(VLV_PMWGICZ
);
1090 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1091 s
->gu_ctl0
= I915_READ(VLV_GU_CTL0
);
1092 s
->gu_ctl1
= I915_READ(VLV_GU_CTL1
);
1093 s
->clock_gate_dis2
= I915_READ(VLV_GUNIT_CLOCK_GATE2
);
1096 * Not saving any of:
1097 * DFT, 0x9800-0x9EC0
1098 * SARB, 0xB000-0xB1FC
1099 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1104 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private
*dev_priv
)
1106 struct vlv_s0ix_state
*s
= &dev_priv
->vlv_s0ix_state
;
1110 /* GAM 0x4000-0x4770 */
1111 I915_WRITE(GEN7_WR_WATERMARK
, s
->wr_watermark
);
1112 I915_WRITE(GEN7_GFX_PRIO_CTRL
, s
->gfx_prio_ctrl
);
1113 I915_WRITE(ARB_MODE
, s
->arb_mode
| (0xffff << 16));
1114 I915_WRITE(GEN7_GFX_PEND_TLB0
, s
->gfx_pend_tlb0
);
1115 I915_WRITE(GEN7_GFX_PEND_TLB1
, s
->gfx_pend_tlb1
);
1117 for (i
= 0; i
< ARRAY_SIZE(s
->lra_limits
); i
++)
1118 I915_WRITE(GEN7_LRA_LIMITS_BASE
+ i
* 4, s
->lra_limits
[i
]);
1120 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT
, s
->media_max_req_count
);
1121 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT
, s
->gfx_max_req_count
);
1123 I915_WRITE(RENDER_HWS_PGA_GEN7
, s
->render_hwsp
);
1124 I915_WRITE(GAM_ECOCHK
, s
->ecochk
);
1125 I915_WRITE(BSD_HWS_PGA_GEN7
, s
->bsd_hwsp
);
1126 I915_WRITE(BLT_HWS_PGA_GEN7
, s
->blt_hwsp
);
1128 I915_WRITE(GEN7_TLB_RD_ADDR
, s
->tlb_rd_addr
);
1130 /* MBC 0x9024-0x91D0, 0x8500 */
1131 I915_WRITE(VLV_G3DCTL
, s
->g3dctl
);
1132 I915_WRITE(VLV_GSCKGCTL
, s
->gsckgctl
);
1133 I915_WRITE(GEN6_MBCTL
, s
->mbctl
);
1135 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1136 I915_WRITE(GEN6_UCGCTL1
, s
->ucgctl1
);
1137 I915_WRITE(GEN6_UCGCTL3
, s
->ucgctl3
);
1138 I915_WRITE(GEN6_RCGCTL1
, s
->rcgctl1
);
1139 I915_WRITE(GEN6_RCGCTL2
, s
->rcgctl2
);
1140 I915_WRITE(GEN6_RSTCTL
, s
->rstctl
);
1141 I915_WRITE(GEN7_MISCCPCTL
, s
->misccpctl
);
1143 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1144 I915_WRITE(GEN6_GFXPAUSE
, s
->gfxpause
);
1145 I915_WRITE(GEN6_RPDEUHWTC
, s
->rpdeuhwtc
);
1146 I915_WRITE(GEN6_RPDEUC
, s
->rpdeuc
);
1147 I915_WRITE(ECOBUS
, s
->ecobus
);
1148 I915_WRITE(VLV_PWRDWNUPCTL
, s
->pwrdwnupctl
);
1149 I915_WRITE(GEN6_RP_DOWN_TIMEOUT
,s
->rp_down_timeout
);
1150 I915_WRITE(GEN6_RPDEUCSW
, s
->rp_deucsw
);
1151 I915_WRITE(GEN6_RCUBMABDTMR
, s
->rcubmabdtmr
);
1152 I915_WRITE(VLV_RCEDATA
, s
->rcedata
);
1153 I915_WRITE(VLV_SPAREG2H
, s
->spare2gh
);
1155 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1156 I915_WRITE(GTIMR
, s
->gt_imr
);
1157 I915_WRITE(GTIER
, s
->gt_ier
);
1158 I915_WRITE(GEN6_PMIMR
, s
->pm_imr
);
1159 I915_WRITE(GEN6_PMIER
, s
->pm_ier
);
1161 for (i
= 0; i
< ARRAY_SIZE(s
->gt_scratch
); i
++)
1162 I915_WRITE(GEN7_GT_SCRATCH_BASE
+ i
* 4, s
->gt_scratch
[i
]);
1164 /* GT SA CZ domain, 0x100000-0x138124 */
1165 I915_WRITE(TILECTL
, s
->tilectl
);
1166 I915_WRITE(GTFIFOCTL
, s
->gt_fifoctl
);
1168 * Preserve the GT allow wake and GFX force clock bit, they are not
1169 * be restored, as they are used to control the s0ix suspend/resume
1170 * sequence by the caller.
1172 val
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1173 val
&= VLV_GTLC_ALLOWWAKEREQ
;
1174 val
|= s
->gtlc_wake_ctrl
& ~VLV_GTLC_ALLOWWAKEREQ
;
1175 I915_WRITE(VLV_GTLC_WAKE_CTRL
, val
);
1177 val
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1178 val
&= VLV_GFX_CLK_FORCE_ON_BIT
;
1179 val
|= s
->gtlc_survive
& ~VLV_GFX_CLK_FORCE_ON_BIT
;
1180 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG
, val
);
1182 I915_WRITE(VLV_PMWGICZ
, s
->pmwgicz
);
1184 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1185 I915_WRITE(VLV_GU_CTL0
, s
->gu_ctl0
);
1186 I915_WRITE(VLV_GU_CTL1
, s
->gu_ctl1
);
1187 I915_WRITE(VLV_GUNIT_CLOCK_GATE2
, s
->clock_gate_dis2
);
1190 int vlv_force_gfx_clock(struct drm_i915_private
*dev_priv
, bool force_on
)
1195 val
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1196 WARN_ON(!!(val
& VLV_GFX_CLK_FORCE_ON_BIT
) == force_on
);
1198 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1199 /* Wait for a previous force-off to settle */
1201 err
= wait_for(!COND
, 20);
1203 DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1204 I915_READ(VLV_GTLC_SURVIVABILITY_REG
));
1209 val
= I915_READ(VLV_GTLC_SURVIVABILITY_REG
);
1210 val
&= ~VLV_GFX_CLK_FORCE_ON_BIT
;
1212 val
|= VLV_GFX_CLK_FORCE_ON_BIT
;
1213 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG
, val
);
1218 err
= wait_for(COND
, 20);
1220 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1221 I915_READ(VLV_GTLC_SURVIVABILITY_REG
));
1227 static int vlv_allow_gt_wake(struct drm_i915_private
*dev_priv
, bool allow
)
1232 val
= I915_READ(VLV_GTLC_WAKE_CTRL
);
1233 val
&= ~VLV_GTLC_ALLOWWAKEREQ
;
1235 val
|= VLV_GTLC_ALLOWWAKEREQ
;
1236 I915_WRITE(VLV_GTLC_WAKE_CTRL
, val
);
1237 POSTING_READ(VLV_GTLC_WAKE_CTRL
);
1239 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1241 err
= wait_for(COND
, 1);
1243 DRM_ERROR("timeout disabling GT waking\n");
1248 static int vlv_wait_for_gt_wells(struct drm_i915_private
*dev_priv
,
1255 mask
= VLV_GTLC_PW_MEDIA_STATUS_MASK
| VLV_GTLC_PW_RENDER_STATUS_MASK
;
1256 val
= wait_for_on
? mask
: 0;
1257 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1261 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1262 wait_for_on
? "on" : "off",
1263 I915_READ(VLV_GTLC_PW_STATUS
));
1266 * RC6 transitioning can be delayed up to 2 msec (see
1267 * valleyview_enable_rps), use 3 msec for safety.
1269 err
= wait_for(COND
, 3);
1271 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1272 wait_for_on
? "on" : "off");
1278 static void vlv_check_no_gt_access(struct drm_i915_private
*dev_priv
)
1280 if (!(I915_READ(VLV_GTLC_PW_STATUS
) & VLV_GTLC_ALLOWWAKEERR
))
1283 DRM_ERROR("GT register access while GT waking disabled\n");
1284 I915_WRITE(VLV_GTLC_PW_STATUS
, VLV_GTLC_ALLOWWAKEERR
);
1287 static int vlv_runtime_suspend(struct drm_i915_private
*dev_priv
)
1293 * Bspec defines the following GT well on flags as debug only, so
1294 * don't treat them as hard failures.
1296 (void)vlv_wait_for_gt_wells(dev_priv
, false);
1298 mask
= VLV_GTLC_RENDER_CTX_EXISTS
| VLV_GTLC_MEDIA_CTX_EXISTS
;
1299 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL
) & mask
) != mask
);
1301 vlv_check_no_gt_access(dev_priv
);
1303 err
= vlv_force_gfx_clock(dev_priv
, true);
1307 err
= vlv_allow_gt_wake(dev_priv
, false);
1310 vlv_save_gunit_s0ix_state(dev_priv
);
1312 err
= vlv_force_gfx_clock(dev_priv
, false);
1319 /* For safety always re-enable waking and disable gfx clock forcing */
1320 vlv_allow_gt_wake(dev_priv
, true);
1322 vlv_force_gfx_clock(dev_priv
, false);
1327 static int vlv_runtime_resume(struct drm_i915_private
*dev_priv
)
1329 struct drm_device
*dev
= dev_priv
->dev
;
1334 * If any of the steps fail just try to continue, that's the best we
1335 * can do at this point. Return the first error code (which will also
1336 * leave RPM permanently disabled).
1338 ret
= vlv_force_gfx_clock(dev_priv
, true);
1340 vlv_restore_gunit_s0ix_state(dev_priv
);
1342 err
= vlv_allow_gt_wake(dev_priv
, true);
1346 err
= vlv_force_gfx_clock(dev_priv
, false);
1350 vlv_check_no_gt_access(dev_priv
);
1352 intel_init_clock_gating(dev
);
1353 i915_gem_restore_fences(dev
);
1358 static int intel_runtime_suspend(struct device
*device
)
1360 struct pci_dev
*pdev
= to_pci_dev(device
);
1361 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1362 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1365 if (WARN_ON_ONCE(!(dev_priv
->rps
.enabled
&& intel_enable_rc6(dev
))))
1368 WARN_ON(!HAS_RUNTIME_PM(dev
));
1369 assert_force_wake_inactive(dev_priv
);
1371 DRM_DEBUG_KMS("Suspending device\n");
1374 * We could deadlock here in case another thread holding struct_mutex
1375 * calls RPM suspend concurrently, since the RPM suspend will wait
1376 * first for this RPM suspend to finish. In this case the concurrent
1377 * RPM resume will be followed by its RPM suspend counterpart. Still
1378 * for consistency return -EAGAIN, which will reschedule this suspend.
1380 if (!mutex_trylock(&dev
->struct_mutex
)) {
1381 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1383 * Bump the expiration timestamp, otherwise the suspend won't
1386 pm_runtime_mark_last_busy(device
);
1391 * We are safe here against re-faults, since the fault handler takes
1394 i915_gem_release_all_mmaps(dev_priv
);
1395 mutex_unlock(&dev
->struct_mutex
);
1398 * rps.work can't be rearmed here, since we get here only after making
1399 * sure the GPU is idle and the RPS freq is set to the minimum. See
1400 * intel_mark_idle().
1402 cancel_work_sync(&dev_priv
->rps
.work
);
1403 intel_runtime_pm_disable_interrupts(dev
);
1407 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
1408 ret
= hsw_runtime_suspend(dev_priv
);
1409 } else if (IS_VALLEYVIEW(dev
)) {
1410 ret
= vlv_runtime_suspend(dev_priv
);
1417 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret
);
1418 intel_runtime_pm_restore_interrupts(dev
);
1423 del_timer_sync(&dev_priv
->gpu_error
.hangcheck_timer
);
1424 dev_priv
->pm
.suspended
= true;
1427 * current versions of firmware which depend on this opregion
1428 * notification have repurposed the D1 definition to mean
1429 * "runtime suspended" vs. what you would normally expect (D3)
1430 * to distinguish it from notifications that might be sent
1431 * via the suspend path.
1433 intel_opregion_notify_adapter(dev
, PCI_D1
);
1435 DRM_DEBUG_KMS("Device suspended\n");
1439 static int intel_runtime_resume(struct device
*device
)
1441 struct pci_dev
*pdev
= to_pci_dev(device
);
1442 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1443 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1446 WARN_ON(!HAS_RUNTIME_PM(dev
));
1448 DRM_DEBUG_KMS("Resuming device\n");
1450 intel_opregion_notify_adapter(dev
, PCI_D0
);
1451 dev_priv
->pm
.suspended
= false;
1454 ret
= snb_runtime_resume(dev_priv
);
1455 } else if (IS_HASWELL(dev
) || IS_BROADWELL(dev
)) {
1456 ret
= hsw_runtime_resume(dev_priv
);
1457 } else if (IS_VALLEYVIEW(dev
)) {
1458 ret
= vlv_runtime_resume(dev_priv
);
1465 * No point of rolling back things in case of an error, as the best
1466 * we can do is to hope that things will still work (and disable RPM).
1468 i915_gem_init_swizzling(dev
);
1469 gen6_update_ring_freq(dev
);
1471 intel_runtime_pm_restore_interrupts(dev
);
1472 intel_reset_gt_powersave(dev
);
1475 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret
);
1477 DRM_DEBUG_KMS("Device resumed\n");
1482 static const struct dev_pm_ops i915_pm_ops
= {
1483 .suspend
= i915_pm_suspend
,
1484 .suspend_late
= i915_pm_suspend_late
,
1485 .resume_early
= i915_pm_resume_early
,
1486 .resume
= i915_pm_resume
,
1487 .freeze
= i915_pm_freeze
,
1488 .thaw_early
= i915_pm_thaw_early
,
1489 .thaw
= i915_pm_thaw
,
1490 .poweroff
= i915_pm_poweroff
,
1491 .restore_early
= i915_pm_resume_early
,
1492 .restore
= i915_pm_resume
,
1493 .runtime_suspend
= intel_runtime_suspend
,
1494 .runtime_resume
= intel_runtime_resume
,
1497 static const struct vm_operations_struct i915_gem_vm_ops
= {
1498 .fault
= i915_gem_fault
,
1499 .open
= drm_gem_vm_open
,
1500 .close
= drm_gem_vm_close
,
1503 static const struct file_operations i915_driver_fops
= {
1504 .owner
= THIS_MODULE
,
1506 .release
= drm_release
,
1507 .unlocked_ioctl
= drm_ioctl
,
1508 .mmap
= drm_gem_mmap
,
1511 #ifdef CONFIG_COMPAT
1512 .compat_ioctl
= i915_compat_ioctl
,
1514 .llseek
= noop_llseek
,
1517 static struct drm_driver driver
= {
1518 /* Don't use MTRRs here; the Xserver or userspace app should
1519 * deal with them for Intel hardware.
1523 DRIVER_HAVE_IRQ
| DRIVER_IRQ_SHARED
| DRIVER_GEM
| DRIVER_PRIME
|
1525 .load
= i915_driver_load
,
1526 .unload
= i915_driver_unload
,
1527 .open
= i915_driver_open
,
1528 .lastclose
= i915_driver_lastclose
,
1529 .preclose
= i915_driver_preclose
,
1530 .postclose
= i915_driver_postclose
,
1532 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1533 .suspend
= i915_suspend
,
1534 .resume
= i915_resume_legacy
,
1536 .device_is_agp
= i915_driver_device_is_agp
,
1537 .master_create
= i915_master_create
,
1538 .master_destroy
= i915_master_destroy
,
1539 #if defined(CONFIG_DEBUG_FS)
1540 .debugfs_init
= i915_debugfs_init
,
1541 .debugfs_cleanup
= i915_debugfs_cleanup
,
1543 .gem_free_object
= i915_gem_free_object
,
1544 .gem_vm_ops
= &i915_gem_vm_ops
,
1546 .prime_handle_to_fd
= drm_gem_prime_handle_to_fd
,
1547 .prime_fd_to_handle
= drm_gem_prime_fd_to_handle
,
1548 .gem_prime_export
= i915_gem_prime_export
,
1549 .gem_prime_import
= i915_gem_prime_import
,
1551 .dumb_create
= i915_gem_dumb_create
,
1552 .dumb_map_offset
= i915_gem_mmap_gtt
,
1553 .dumb_destroy
= drm_gem_dumb_destroy
,
1554 .ioctls
= i915_ioctls
,
1555 .fops
= &i915_driver_fops
,
1556 .name
= DRIVER_NAME
,
1557 .desc
= DRIVER_DESC
,
1558 .date
= DRIVER_DATE
,
1559 .major
= DRIVER_MAJOR
,
1560 .minor
= DRIVER_MINOR
,
1561 .patchlevel
= DRIVER_PATCHLEVEL
,
1564 static struct pci_driver i915_pci_driver
= {
1565 .name
= DRIVER_NAME
,
1566 .id_table
= pciidlist
,
1567 .probe
= i915_pci_probe
,
1568 .remove
= i915_pci_remove
,
1569 .driver
.pm
= &i915_pm_ops
,
1572 static int __init
i915_init(void)
1574 driver
.num_ioctls
= i915_max_ioctl
;
1577 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1578 * explicitly disabled with the module pararmeter.
1580 * Otherwise, just follow the parameter (defaulting to off).
1582 * Allow optional vga_text_mode_force boot option to override
1583 * the default behavior.
1585 #if defined(CONFIG_DRM_I915_KMS)
1586 if (i915
.modeset
!= 0)
1587 driver
.driver_features
|= DRIVER_MODESET
;
1589 if (i915
.modeset
== 1)
1590 driver
.driver_features
|= DRIVER_MODESET
;
1592 #ifdef CONFIG_VGA_CONSOLE
1593 if (vgacon_text_force() && i915
.modeset
== -1)
1594 driver
.driver_features
&= ~DRIVER_MODESET
;
1597 if (!(driver
.driver_features
& DRIVER_MODESET
)) {
1598 driver
.get_vblank_timestamp
= NULL
;
1599 #ifndef CONFIG_DRM_I915_UMS
1600 /* Silently fail loading to not upset userspace. */
1601 DRM_DEBUG_DRIVER("KMS and UMS disabled.\n");
1606 return drm_pci_init(&driver
, &i915_pci_driver
);
1609 static void __exit
i915_exit(void)
1611 #ifndef CONFIG_DRM_I915_UMS
1612 if (!(driver
.driver_features
& DRIVER_MODESET
))
1613 return; /* Never loaded a driver. */
1616 drm_pci_exit(&driver
, &i915_pci_driver
);
1619 module_init(i915_init
);
1620 module_exit(i915_exit
);
1622 MODULE_AUTHOR(DRIVER_AUTHOR
);
1623 MODULE_DESCRIPTION(DRIVER_DESC
);
1624 MODULE_LICENSE("GPL and additional rights");