drm/i915: Mark device as wedged if we fail to resume
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3 /*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30 #include <linux/device.h>
31 #include <drm/drmP.h>
32 #include <drm/i915_drm.h>
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <drm/drm_crtc_helper.h>
40
41 static struct drm_driver driver;
42
43 #define GEN_DEFAULT_PIPEOFFSETS \
44 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
45 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
46 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
47 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
48 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
49 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
50 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
51
52
53 static const struct intel_device_info intel_i830_info = {
54 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
55 .has_overlay = 1, .overlay_needs_physical = 1,
56 .ring_mask = RENDER_RING,
57 GEN_DEFAULT_PIPEOFFSETS,
58 };
59
60 static const struct intel_device_info intel_845g_info = {
61 .gen = 2, .num_pipes = 1,
62 .has_overlay = 1, .overlay_needs_physical = 1,
63 .ring_mask = RENDER_RING,
64 GEN_DEFAULT_PIPEOFFSETS,
65 };
66
67 static const struct intel_device_info intel_i85x_info = {
68 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
69 .cursor_needs_physical = 1,
70 .has_overlay = 1, .overlay_needs_physical = 1,
71 .has_fbc = 1,
72 .ring_mask = RENDER_RING,
73 GEN_DEFAULT_PIPEOFFSETS,
74 };
75
76 static const struct intel_device_info intel_i865g_info = {
77 .gen = 2, .num_pipes = 1,
78 .has_overlay = 1, .overlay_needs_physical = 1,
79 .ring_mask = RENDER_RING,
80 GEN_DEFAULT_PIPEOFFSETS,
81 };
82
83 static const struct intel_device_info intel_i915g_info = {
84 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
85 .has_overlay = 1, .overlay_needs_physical = 1,
86 .ring_mask = RENDER_RING,
87 GEN_DEFAULT_PIPEOFFSETS,
88 };
89 static const struct intel_device_info intel_i915gm_info = {
90 .gen = 3, .is_mobile = 1, .num_pipes = 2,
91 .cursor_needs_physical = 1,
92 .has_overlay = 1, .overlay_needs_physical = 1,
93 .supports_tv = 1,
94 .has_fbc = 1,
95 .ring_mask = RENDER_RING,
96 GEN_DEFAULT_PIPEOFFSETS,
97 };
98 static const struct intel_device_info intel_i945g_info = {
99 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
100 .has_overlay = 1, .overlay_needs_physical = 1,
101 .ring_mask = RENDER_RING,
102 GEN_DEFAULT_PIPEOFFSETS,
103 };
104 static const struct intel_device_info intel_i945gm_info = {
105 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
106 .has_hotplug = 1, .cursor_needs_physical = 1,
107 .has_overlay = 1, .overlay_needs_physical = 1,
108 .supports_tv = 1,
109 .has_fbc = 1,
110 .ring_mask = RENDER_RING,
111 GEN_DEFAULT_PIPEOFFSETS,
112 };
113
114 static const struct intel_device_info intel_i965g_info = {
115 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
116 .has_hotplug = 1,
117 .has_overlay = 1,
118 .ring_mask = RENDER_RING,
119 GEN_DEFAULT_PIPEOFFSETS,
120 };
121
122 static const struct intel_device_info intel_i965gm_info = {
123 .gen = 4, .is_crestline = 1, .num_pipes = 2,
124 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
125 .has_overlay = 1,
126 .supports_tv = 1,
127 .ring_mask = RENDER_RING,
128 GEN_DEFAULT_PIPEOFFSETS,
129 };
130
131 static const struct intel_device_info intel_g33_info = {
132 .gen = 3, .is_g33 = 1, .num_pipes = 2,
133 .need_gfx_hws = 1, .has_hotplug = 1,
134 .has_overlay = 1,
135 .ring_mask = RENDER_RING,
136 GEN_DEFAULT_PIPEOFFSETS,
137 };
138
139 static const struct intel_device_info intel_g45_info = {
140 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
141 .has_pipe_cxsr = 1, .has_hotplug = 1,
142 .ring_mask = RENDER_RING | BSD_RING,
143 GEN_DEFAULT_PIPEOFFSETS,
144 };
145
146 static const struct intel_device_info intel_gm45_info = {
147 .gen = 4, .is_g4x = 1, .num_pipes = 2,
148 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
149 .has_pipe_cxsr = 1, .has_hotplug = 1,
150 .supports_tv = 1,
151 .ring_mask = RENDER_RING | BSD_RING,
152 GEN_DEFAULT_PIPEOFFSETS,
153 };
154
155 static const struct intel_device_info intel_pineview_info = {
156 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
157 .need_gfx_hws = 1, .has_hotplug = 1,
158 .has_overlay = 1,
159 GEN_DEFAULT_PIPEOFFSETS,
160 };
161
162 static const struct intel_device_info intel_ironlake_d_info = {
163 .gen = 5, .num_pipes = 2,
164 .need_gfx_hws = 1, .has_hotplug = 1,
165 .ring_mask = RENDER_RING | BSD_RING,
166 GEN_DEFAULT_PIPEOFFSETS,
167 };
168
169 static const struct intel_device_info intel_ironlake_m_info = {
170 .gen = 5, .is_mobile = 1, .num_pipes = 2,
171 .need_gfx_hws = 1, .has_hotplug = 1,
172 .has_fbc = 1,
173 .ring_mask = RENDER_RING | BSD_RING,
174 GEN_DEFAULT_PIPEOFFSETS,
175 };
176
177 static const struct intel_device_info intel_sandybridge_d_info = {
178 .gen = 6, .num_pipes = 2,
179 .need_gfx_hws = 1, .has_hotplug = 1,
180 .has_fbc = 1,
181 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
182 .has_llc = 1,
183 GEN_DEFAULT_PIPEOFFSETS,
184 };
185
186 static const struct intel_device_info intel_sandybridge_m_info = {
187 .gen = 6, .is_mobile = 1, .num_pipes = 2,
188 .need_gfx_hws = 1, .has_hotplug = 1,
189 .has_fbc = 1,
190 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
191 .has_llc = 1,
192 GEN_DEFAULT_PIPEOFFSETS,
193 };
194
195 #define GEN7_FEATURES \
196 .gen = 7, .num_pipes = 3, \
197 .need_gfx_hws = 1, .has_hotplug = 1, \
198 .has_fbc = 1, \
199 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
200 .has_llc = 1
201
202 static const struct intel_device_info intel_ivybridge_d_info = {
203 GEN7_FEATURES,
204 .is_ivybridge = 1,
205 GEN_DEFAULT_PIPEOFFSETS,
206 };
207
208 static const struct intel_device_info intel_ivybridge_m_info = {
209 GEN7_FEATURES,
210 .is_ivybridge = 1,
211 .is_mobile = 1,
212 GEN_DEFAULT_PIPEOFFSETS,
213 };
214
215 static const struct intel_device_info intel_ivybridge_q_info = {
216 GEN7_FEATURES,
217 .is_ivybridge = 1,
218 .num_pipes = 0, /* legal, last one wins */
219 GEN_DEFAULT_PIPEOFFSETS,
220 };
221
222 static const struct intel_device_info intel_valleyview_m_info = {
223 GEN7_FEATURES,
224 .is_mobile = 1,
225 .num_pipes = 2,
226 .is_valleyview = 1,
227 .display_mmio_offset = VLV_DISPLAY_BASE,
228 .has_fbc = 0, /* legal, last one wins */
229 .has_llc = 0, /* legal, last one wins */
230 GEN_DEFAULT_PIPEOFFSETS,
231 };
232
233 static const struct intel_device_info intel_valleyview_d_info = {
234 GEN7_FEATURES,
235 .num_pipes = 2,
236 .is_valleyview = 1,
237 .display_mmio_offset = VLV_DISPLAY_BASE,
238 .has_fbc = 0, /* legal, last one wins */
239 .has_llc = 0, /* legal, last one wins */
240 GEN_DEFAULT_PIPEOFFSETS,
241 };
242
243 static const struct intel_device_info intel_haswell_d_info = {
244 GEN7_FEATURES,
245 .is_haswell = 1,
246 .has_ddi = 1,
247 .has_fpga_dbg = 1,
248 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
249 GEN_DEFAULT_PIPEOFFSETS,
250 };
251
252 static const struct intel_device_info intel_haswell_m_info = {
253 GEN7_FEATURES,
254 .is_haswell = 1,
255 .is_mobile = 1,
256 .has_ddi = 1,
257 .has_fpga_dbg = 1,
258 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
259 GEN_DEFAULT_PIPEOFFSETS,
260 };
261
262 static const struct intel_device_info intel_broadwell_d_info = {
263 .gen = 8, .num_pipes = 3,
264 .need_gfx_hws = 1, .has_hotplug = 1,
265 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
266 .has_llc = 1,
267 .has_ddi = 1,
268 .has_fbc = 1,
269 GEN_DEFAULT_PIPEOFFSETS,
270 };
271
272 static const struct intel_device_info intel_broadwell_m_info = {
273 .gen = 8, .is_mobile = 1, .num_pipes = 3,
274 .need_gfx_hws = 1, .has_hotplug = 1,
275 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
276 .has_llc = 1,
277 .has_ddi = 1,
278 .has_fbc = 1,
279 GEN_DEFAULT_PIPEOFFSETS,
280 };
281
282 /*
283 * Make sure any device matches here are from most specific to most
284 * general. For example, since the Quanta match is based on the subsystem
285 * and subvendor IDs, we need it to come before the more general IVB
286 * PCI ID matches, otherwise we'll use the wrong info struct above.
287 */
288 #define INTEL_PCI_IDS \
289 INTEL_I830_IDS(&intel_i830_info), \
290 INTEL_I845G_IDS(&intel_845g_info), \
291 INTEL_I85X_IDS(&intel_i85x_info), \
292 INTEL_I865G_IDS(&intel_i865g_info), \
293 INTEL_I915G_IDS(&intel_i915g_info), \
294 INTEL_I915GM_IDS(&intel_i915gm_info), \
295 INTEL_I945G_IDS(&intel_i945g_info), \
296 INTEL_I945GM_IDS(&intel_i945gm_info), \
297 INTEL_I965G_IDS(&intel_i965g_info), \
298 INTEL_G33_IDS(&intel_g33_info), \
299 INTEL_I965GM_IDS(&intel_i965gm_info), \
300 INTEL_GM45_IDS(&intel_gm45_info), \
301 INTEL_G45_IDS(&intel_g45_info), \
302 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
303 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
304 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
305 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
306 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
307 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
308 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
309 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
310 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
311 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
312 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
313 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
314 INTEL_BDW_M_IDS(&intel_broadwell_m_info), \
315 INTEL_BDW_D_IDS(&intel_broadwell_d_info)
316
317 static const struct pci_device_id pciidlist[] = { /* aka */
318 INTEL_PCI_IDS,
319 {0, 0, 0}
320 };
321
322 #if defined(CONFIG_DRM_I915_KMS)
323 MODULE_DEVICE_TABLE(pci, pciidlist);
324 #endif
325
326 void intel_detect_pch(struct drm_device *dev)
327 {
328 struct drm_i915_private *dev_priv = dev->dev_private;
329 struct pci_dev *pch = NULL;
330
331 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
332 * (which really amounts to a PCH but no South Display).
333 */
334 if (INTEL_INFO(dev)->num_pipes == 0) {
335 dev_priv->pch_type = PCH_NOP;
336 return;
337 }
338
339 /*
340 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
341 * make graphics device passthrough work easy for VMM, that only
342 * need to expose ISA bridge to let driver know the real hardware
343 * underneath. This is a requirement from virtualization team.
344 *
345 * In some virtualized environments (e.g. XEN), there is irrelevant
346 * ISA bridge in the system. To work reliably, we should scan trhough
347 * all the ISA bridge devices and check for the first match, instead
348 * of only checking the first one.
349 */
350 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
351 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
352 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
353 dev_priv->pch_id = id;
354
355 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
356 dev_priv->pch_type = PCH_IBX;
357 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
358 WARN_ON(!IS_GEN5(dev));
359 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
360 dev_priv->pch_type = PCH_CPT;
361 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
362 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
363 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
364 /* PantherPoint is CPT compatible */
365 dev_priv->pch_type = PCH_CPT;
366 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
367 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
368 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
369 dev_priv->pch_type = PCH_LPT;
370 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
371 WARN_ON(!IS_HASWELL(dev));
372 WARN_ON(IS_ULT(dev));
373 } else if (IS_BROADWELL(dev)) {
374 dev_priv->pch_type = PCH_LPT;
375 dev_priv->pch_id =
376 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
377 DRM_DEBUG_KMS("This is Broadwell, assuming "
378 "LynxPoint LP PCH\n");
379 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
380 dev_priv->pch_type = PCH_LPT;
381 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
382 WARN_ON(!IS_HASWELL(dev));
383 WARN_ON(!IS_ULT(dev));
384 } else
385 continue;
386
387 break;
388 }
389 }
390 if (!pch)
391 DRM_DEBUG_KMS("No PCH found.\n");
392
393 pci_dev_put(pch);
394 }
395
396 bool i915_semaphore_is_enabled(struct drm_device *dev)
397 {
398 if (INTEL_INFO(dev)->gen < 6)
399 return false;
400
401 if (i915.semaphores >= 0)
402 return i915.semaphores;
403
404 /* Until we get further testing... */
405 if (IS_GEN8(dev))
406 return false;
407
408 #ifdef CONFIG_INTEL_IOMMU
409 /* Enable semaphores on SNB when IO remapping is off */
410 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
411 return false;
412 #endif
413
414 return true;
415 }
416
417 static int i915_drm_freeze(struct drm_device *dev)
418 {
419 struct drm_i915_private *dev_priv = dev->dev_private;
420 struct drm_crtc *crtc;
421
422 intel_runtime_pm_get(dev_priv);
423
424 /* ignore lid events during suspend */
425 mutex_lock(&dev_priv->modeset_restore_lock);
426 dev_priv->modeset_restore = MODESET_SUSPENDED;
427 mutex_unlock(&dev_priv->modeset_restore_lock);
428
429 /* We do a lot of poking in a lot of registers, make sure they work
430 * properly. */
431 intel_display_set_init_power(dev_priv, true);
432
433 drm_kms_helper_poll_disable(dev);
434
435 pci_save_state(dev->pdev);
436
437 /* If KMS is active, we do the leavevt stuff here */
438 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
439 int error;
440
441 error = i915_gem_suspend(dev);
442 if (error) {
443 dev_err(&dev->pdev->dev,
444 "GEM idle failed, resume might fail\n");
445 return error;
446 }
447
448 cancel_delayed_work_sync(&dev_priv->rps.delayed_resume_work);
449
450 drm_irq_uninstall(dev);
451 dev_priv->enable_hotplug_processing = false;
452 /*
453 * Disable CRTCs directly since we want to preserve sw state
454 * for _thaw.
455 */
456 mutex_lock(&dev->mode_config.mutex);
457 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
458 dev_priv->display.crtc_disable(crtc);
459 mutex_unlock(&dev->mode_config.mutex);
460
461 intel_modeset_suspend_hw(dev);
462 }
463
464 i915_gem_suspend_gtt_mappings(dev);
465
466 i915_save_state(dev);
467
468 intel_opregion_fini(dev);
469 intel_uncore_fini(dev);
470
471 console_lock();
472 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
473 console_unlock();
474
475 dev_priv->suspend_count++;
476
477 return 0;
478 }
479
480 int i915_suspend(struct drm_device *dev, pm_message_t state)
481 {
482 int error;
483
484 if (!dev || !dev->dev_private) {
485 DRM_ERROR("dev: %p\n", dev);
486 DRM_ERROR("DRM not initialized, aborting suspend.\n");
487 return -ENODEV;
488 }
489
490 if (state.event == PM_EVENT_PRETHAW)
491 return 0;
492
493
494 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
495 return 0;
496
497 error = i915_drm_freeze(dev);
498 if (error)
499 return error;
500
501 if (state.event == PM_EVENT_SUSPEND) {
502 /* Shut down the device */
503 pci_disable_device(dev->pdev);
504 pci_set_power_state(dev->pdev, PCI_D3hot);
505 }
506
507 return 0;
508 }
509
510 void intel_console_resume(struct work_struct *work)
511 {
512 struct drm_i915_private *dev_priv =
513 container_of(work, struct drm_i915_private,
514 console_resume_work);
515 struct drm_device *dev = dev_priv->dev;
516
517 console_lock();
518 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
519 console_unlock();
520 }
521
522 static void intel_resume_hotplug(struct drm_device *dev)
523 {
524 struct drm_mode_config *mode_config = &dev->mode_config;
525 struct intel_encoder *encoder;
526
527 mutex_lock(&mode_config->mutex);
528 DRM_DEBUG_KMS("running encoder hotplug functions\n");
529
530 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
531 if (encoder->hot_plug)
532 encoder->hot_plug(encoder);
533
534 mutex_unlock(&mode_config->mutex);
535
536 /* Just fire off a uevent and let userspace tell us what to do */
537 drm_helper_hpd_irq_event(dev);
538 }
539
540 static int i915_drm_thaw_early(struct drm_device *dev)
541 {
542 struct drm_i915_private *dev_priv = dev->dev_private;
543
544 intel_uncore_early_sanitize(dev);
545 intel_uncore_sanitize(dev);
546 intel_power_domains_init_hw(dev_priv);
547
548 return 0;
549 }
550
551 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
552 {
553 struct drm_i915_private *dev_priv = dev->dev_private;
554
555 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
556 restore_gtt_mappings) {
557 mutex_lock(&dev->struct_mutex);
558 i915_gem_restore_gtt_mappings(dev);
559 mutex_unlock(&dev->struct_mutex);
560 }
561
562 i915_restore_state(dev);
563 intel_opregion_setup(dev);
564
565 /* KMS EnterVT equivalent */
566 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
567 intel_init_pch_refclk(dev);
568 drm_mode_config_reset(dev);
569
570 mutex_lock(&dev->struct_mutex);
571 if (i915_gem_init_hw(dev)) {
572 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
573 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
574 }
575 mutex_unlock(&dev->struct_mutex);
576
577 /* We need working interrupts for modeset enabling ... */
578 drm_irq_install(dev, dev->pdev->irq);
579
580 intel_modeset_init_hw(dev);
581
582 drm_modeset_lock_all(dev);
583 intel_modeset_setup_hw_state(dev, true);
584 drm_modeset_unlock_all(dev);
585
586 /*
587 * ... but also need to make sure that hotplug processing
588 * doesn't cause havoc. Like in the driver load code we don't
589 * bother with the tiny race here where we might loose hotplug
590 * notifications.
591 * */
592 intel_hpd_init(dev);
593 dev_priv->enable_hotplug_processing = true;
594 /* Config may have changed between suspend and resume */
595 intel_resume_hotplug(dev);
596 }
597
598 intel_opregion_init(dev);
599
600 /*
601 * The console lock can be pretty contented on resume due
602 * to all the printk activity. Try to keep it out of the hot
603 * path of resume if possible.
604 */
605 if (console_trylock()) {
606 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
607 console_unlock();
608 } else {
609 schedule_work(&dev_priv->console_resume_work);
610 }
611
612 mutex_lock(&dev_priv->modeset_restore_lock);
613 dev_priv->modeset_restore = MODESET_DONE;
614 mutex_unlock(&dev_priv->modeset_restore_lock);
615
616 intel_runtime_pm_put(dev_priv);
617 return 0;
618 }
619
620 static int i915_drm_thaw(struct drm_device *dev)
621 {
622 if (drm_core_check_feature(dev, DRIVER_MODESET))
623 i915_check_and_clear_faults(dev);
624
625 return __i915_drm_thaw(dev, true);
626 }
627
628 static int i915_resume_early(struct drm_device *dev)
629 {
630 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
631 return 0;
632
633 /*
634 * We have a resume ordering issue with the snd-hda driver also
635 * requiring our device to be power up. Due to the lack of a
636 * parent/child relationship we currently solve this with an early
637 * resume hook.
638 *
639 * FIXME: This should be solved with a special hdmi sink device or
640 * similar so that power domains can be employed.
641 */
642 if (pci_enable_device(dev->pdev))
643 return -EIO;
644
645 pci_set_master(dev->pdev);
646
647 return i915_drm_thaw_early(dev);
648 }
649
650 int i915_resume(struct drm_device *dev)
651 {
652 struct drm_i915_private *dev_priv = dev->dev_private;
653 int ret;
654
655 /*
656 * Platforms with opregion should have sane BIOS, older ones (gen3 and
657 * earlier) need to restore the GTT mappings since the BIOS might clear
658 * all our scratch PTEs.
659 */
660 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
661 if (ret)
662 return ret;
663
664 drm_kms_helper_poll_enable(dev);
665 return 0;
666 }
667
668 static int i915_resume_legacy(struct drm_device *dev)
669 {
670 i915_resume_early(dev);
671 i915_resume(dev);
672
673 return 0;
674 }
675
676 /**
677 * i915_reset - reset chip after a hang
678 * @dev: drm device to reset
679 *
680 * Reset the chip. Useful if a hang is detected. Returns zero on successful
681 * reset or otherwise an error code.
682 *
683 * Procedure is fairly simple:
684 * - reset the chip using the reset reg
685 * - re-init context state
686 * - re-init hardware status page
687 * - re-init ring buffer
688 * - re-init interrupt state
689 * - re-init display
690 */
691 int i915_reset(struct drm_device *dev)
692 {
693 struct drm_i915_private *dev_priv = dev->dev_private;
694 bool simulated;
695 int ret;
696
697 if (!i915.reset)
698 return 0;
699
700 mutex_lock(&dev->struct_mutex);
701
702 i915_gem_reset(dev);
703
704 simulated = dev_priv->gpu_error.stop_rings != 0;
705
706 ret = intel_gpu_reset(dev);
707
708 /* Also reset the gpu hangman. */
709 if (simulated) {
710 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
711 dev_priv->gpu_error.stop_rings = 0;
712 if (ret == -ENODEV) {
713 DRM_INFO("Reset not implemented, but ignoring "
714 "error for simulated gpu hangs\n");
715 ret = 0;
716 }
717 }
718
719 if (ret) {
720 DRM_ERROR("Failed to reset chip: %i\n", ret);
721 mutex_unlock(&dev->struct_mutex);
722 return ret;
723 }
724
725 /* Ok, now get things going again... */
726
727 /*
728 * Everything depends on having the GTT running, so we need to start
729 * there. Fortunately we don't need to do this unless we reset the
730 * chip at a PCI level.
731 *
732 * Next we need to restore the context, but we don't use those
733 * yet either...
734 *
735 * Ring buffer needs to be re-initialized in the KMS case, or if X
736 * was running at the time of the reset (i.e. we weren't VT
737 * switched away).
738 */
739 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
740 !dev_priv->ums.mm_suspended) {
741 dev_priv->ums.mm_suspended = 0;
742
743 ret = i915_gem_init_hw(dev);
744 mutex_unlock(&dev->struct_mutex);
745 if (ret) {
746 DRM_ERROR("Failed hw init on reset %d\n", ret);
747 return ret;
748 }
749
750 /*
751 * FIXME: This is horribly race against concurrent pageflip and
752 * vblank wait ioctls since they can observe dev->irqs_disabled
753 * being false when they shouldn't be able to.
754 */
755 drm_irq_uninstall(dev);
756 drm_irq_install(dev, dev->pdev->irq);
757
758 /* rps/rc6 re-init is necessary to restore state lost after the
759 * reset and the re-install of drm irq. Skip for ironlake per
760 * previous concerns that it doesn't respond well to some forms
761 * of re-init after reset. */
762 if (INTEL_INFO(dev)->gen > 5) {
763 mutex_lock(&dev->struct_mutex);
764 intel_enable_gt_powersave(dev);
765 mutex_unlock(&dev->struct_mutex);
766 }
767
768 intel_hpd_init(dev);
769 } else {
770 mutex_unlock(&dev->struct_mutex);
771 }
772
773 return 0;
774 }
775
776 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
777 {
778 struct intel_device_info *intel_info =
779 (struct intel_device_info *) ent->driver_data;
780
781 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
782 DRM_INFO("This hardware requires preliminary hardware support.\n"
783 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
784 return -ENODEV;
785 }
786
787 /* Only bind to function 0 of the device. Early generations
788 * used function 1 as a placeholder for multi-head. This causes
789 * us confusion instead, especially on the systems where both
790 * functions have the same PCI-ID!
791 */
792 if (PCI_FUNC(pdev->devfn))
793 return -ENODEV;
794
795 driver.driver_features &= ~(DRIVER_USE_AGP);
796
797 return drm_get_pci_dev(pdev, ent, &driver);
798 }
799
800 static void
801 i915_pci_remove(struct pci_dev *pdev)
802 {
803 struct drm_device *dev = pci_get_drvdata(pdev);
804
805 drm_put_dev(dev);
806 }
807
808 static int i915_pm_suspend(struct device *dev)
809 {
810 struct pci_dev *pdev = to_pci_dev(dev);
811 struct drm_device *drm_dev = pci_get_drvdata(pdev);
812
813 if (!drm_dev || !drm_dev->dev_private) {
814 dev_err(dev, "DRM not initialized, aborting suspend.\n");
815 return -ENODEV;
816 }
817
818 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
819 return 0;
820
821 return i915_drm_freeze(drm_dev);
822 }
823
824 static int i915_pm_suspend_late(struct device *dev)
825 {
826 struct pci_dev *pdev = to_pci_dev(dev);
827 struct drm_device *drm_dev = pci_get_drvdata(pdev);
828
829 /*
830 * We have a suspedn ordering issue with the snd-hda driver also
831 * requiring our device to be power up. Due to the lack of a
832 * parent/child relationship we currently solve this with an late
833 * suspend hook.
834 *
835 * FIXME: This should be solved with a special hdmi sink device or
836 * similar so that power domains can be employed.
837 */
838 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
839 return 0;
840
841 pci_disable_device(pdev);
842 pci_set_power_state(pdev, PCI_D3hot);
843
844 return 0;
845 }
846
847 static int i915_pm_resume_early(struct device *dev)
848 {
849 struct pci_dev *pdev = to_pci_dev(dev);
850 struct drm_device *drm_dev = pci_get_drvdata(pdev);
851
852 return i915_resume_early(drm_dev);
853 }
854
855 static int i915_pm_resume(struct device *dev)
856 {
857 struct pci_dev *pdev = to_pci_dev(dev);
858 struct drm_device *drm_dev = pci_get_drvdata(pdev);
859
860 return i915_resume(drm_dev);
861 }
862
863 static int i915_pm_freeze(struct device *dev)
864 {
865 struct pci_dev *pdev = to_pci_dev(dev);
866 struct drm_device *drm_dev = pci_get_drvdata(pdev);
867
868 if (!drm_dev || !drm_dev->dev_private) {
869 dev_err(dev, "DRM not initialized, aborting suspend.\n");
870 return -ENODEV;
871 }
872
873 return i915_drm_freeze(drm_dev);
874 }
875
876 static int i915_pm_thaw_early(struct device *dev)
877 {
878 struct pci_dev *pdev = to_pci_dev(dev);
879 struct drm_device *drm_dev = pci_get_drvdata(pdev);
880
881 return i915_drm_thaw_early(drm_dev);
882 }
883
884 static int i915_pm_thaw(struct device *dev)
885 {
886 struct pci_dev *pdev = to_pci_dev(dev);
887 struct drm_device *drm_dev = pci_get_drvdata(pdev);
888
889 return i915_drm_thaw(drm_dev);
890 }
891
892 static int i915_pm_poweroff(struct device *dev)
893 {
894 struct pci_dev *pdev = to_pci_dev(dev);
895 struct drm_device *drm_dev = pci_get_drvdata(pdev);
896
897 return i915_drm_freeze(drm_dev);
898 }
899
900 static void snb_runtime_suspend(struct drm_i915_private *dev_priv)
901 {
902 struct drm_device *dev = dev_priv->dev;
903
904 intel_runtime_pm_disable_interrupts(dev);
905 }
906
907 static void hsw_runtime_suspend(struct drm_i915_private *dev_priv)
908 {
909 hsw_enable_pc8(dev_priv);
910 }
911
912 static void snb_runtime_resume(struct drm_i915_private *dev_priv)
913 {
914 struct drm_device *dev = dev_priv->dev;
915
916 intel_runtime_pm_restore_interrupts(dev);
917 intel_init_pch_refclk(dev);
918 i915_gem_init_swizzling(dev);
919 mutex_lock(&dev_priv->rps.hw_lock);
920 gen6_update_ring_freq(dev);
921 mutex_unlock(&dev_priv->rps.hw_lock);
922 }
923
924 static void hsw_runtime_resume(struct drm_i915_private *dev_priv)
925 {
926 hsw_disable_pc8(dev_priv);
927 }
928
929 static int intel_runtime_suspend(struct device *device)
930 {
931 struct pci_dev *pdev = to_pci_dev(device);
932 struct drm_device *dev = pci_get_drvdata(pdev);
933 struct drm_i915_private *dev_priv = dev->dev_private;
934
935 WARN_ON(!HAS_RUNTIME_PM(dev));
936 assert_force_wake_inactive(dev_priv);
937
938 DRM_DEBUG_KMS("Suspending device\n");
939
940 if (IS_GEN6(dev))
941 snb_runtime_suspend(dev_priv);
942 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
943 hsw_runtime_suspend(dev_priv);
944 else
945 WARN_ON(1);
946
947 i915_gem_release_all_mmaps(dev_priv);
948
949 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
950 dev_priv->pm.suspended = true;
951
952 /*
953 * current versions of firmware which depend on this opregion
954 * notification have repurposed the D1 definition to mean
955 * "runtime suspended" vs. what you would normally expect (D3)
956 * to distinguish it from notifications that might be sent
957 * via the suspend path.
958 */
959 intel_opregion_notify_adapter(dev, PCI_D1);
960
961 DRM_DEBUG_KMS("Device suspended\n");
962 return 0;
963 }
964
965 static int intel_runtime_resume(struct device *device)
966 {
967 struct pci_dev *pdev = to_pci_dev(device);
968 struct drm_device *dev = pci_get_drvdata(pdev);
969 struct drm_i915_private *dev_priv = dev->dev_private;
970
971 WARN_ON(!HAS_RUNTIME_PM(dev));
972
973 DRM_DEBUG_KMS("Resuming device\n");
974
975 intel_opregion_notify_adapter(dev, PCI_D0);
976 dev_priv->pm.suspended = false;
977
978 if (IS_GEN6(dev))
979 snb_runtime_resume(dev_priv);
980 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
981 hsw_runtime_resume(dev_priv);
982 else
983 WARN_ON(1);
984
985 DRM_DEBUG_KMS("Device resumed\n");
986 return 0;
987 }
988
989 static const struct dev_pm_ops i915_pm_ops = {
990 .suspend = i915_pm_suspend,
991 .suspend_late = i915_pm_suspend_late,
992 .resume_early = i915_pm_resume_early,
993 .resume = i915_pm_resume,
994 .freeze = i915_pm_freeze,
995 .thaw_early = i915_pm_thaw_early,
996 .thaw = i915_pm_thaw,
997 .poweroff = i915_pm_poweroff,
998 .restore_early = i915_pm_resume_early,
999 .restore = i915_pm_resume,
1000 .runtime_suspend = intel_runtime_suspend,
1001 .runtime_resume = intel_runtime_resume,
1002 };
1003
1004 static const struct vm_operations_struct i915_gem_vm_ops = {
1005 .fault = i915_gem_fault,
1006 .open = drm_gem_vm_open,
1007 .close = drm_gem_vm_close,
1008 };
1009
1010 static const struct file_operations i915_driver_fops = {
1011 .owner = THIS_MODULE,
1012 .open = drm_open,
1013 .release = drm_release,
1014 .unlocked_ioctl = drm_ioctl,
1015 .mmap = drm_gem_mmap,
1016 .poll = drm_poll,
1017 .read = drm_read,
1018 #ifdef CONFIG_COMPAT
1019 .compat_ioctl = i915_compat_ioctl,
1020 #endif
1021 .llseek = noop_llseek,
1022 };
1023
1024 static struct drm_driver driver = {
1025 /* Don't use MTRRs here; the Xserver or userspace app should
1026 * deal with them for Intel hardware.
1027 */
1028 .driver_features =
1029 DRIVER_USE_AGP |
1030 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1031 DRIVER_RENDER,
1032 .load = i915_driver_load,
1033 .unload = i915_driver_unload,
1034 .open = i915_driver_open,
1035 .lastclose = i915_driver_lastclose,
1036 .preclose = i915_driver_preclose,
1037 .postclose = i915_driver_postclose,
1038
1039 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1040 .suspend = i915_suspend,
1041 .resume = i915_resume_legacy,
1042
1043 .device_is_agp = i915_driver_device_is_agp,
1044 .master_create = i915_master_create,
1045 .master_destroy = i915_master_destroy,
1046 #if defined(CONFIG_DEBUG_FS)
1047 .debugfs_init = i915_debugfs_init,
1048 .debugfs_cleanup = i915_debugfs_cleanup,
1049 #endif
1050 .gem_free_object = i915_gem_free_object,
1051 .gem_vm_ops = &i915_gem_vm_ops,
1052
1053 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1054 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1055 .gem_prime_export = i915_gem_prime_export,
1056 .gem_prime_import = i915_gem_prime_import,
1057
1058 .dumb_create = i915_gem_dumb_create,
1059 .dumb_map_offset = i915_gem_mmap_gtt,
1060 .dumb_destroy = drm_gem_dumb_destroy,
1061 .ioctls = i915_ioctls,
1062 .fops = &i915_driver_fops,
1063 .name = DRIVER_NAME,
1064 .desc = DRIVER_DESC,
1065 .date = DRIVER_DATE,
1066 .major = DRIVER_MAJOR,
1067 .minor = DRIVER_MINOR,
1068 .patchlevel = DRIVER_PATCHLEVEL,
1069 };
1070
1071 static struct pci_driver i915_pci_driver = {
1072 .name = DRIVER_NAME,
1073 .id_table = pciidlist,
1074 .probe = i915_pci_probe,
1075 .remove = i915_pci_remove,
1076 .driver.pm = &i915_pm_ops,
1077 };
1078
1079 static int __init i915_init(void)
1080 {
1081 driver.num_ioctls = i915_max_ioctl;
1082
1083 /*
1084 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1085 * explicitly disabled with the module pararmeter.
1086 *
1087 * Otherwise, just follow the parameter (defaulting to off).
1088 *
1089 * Allow optional vga_text_mode_force boot option to override
1090 * the default behavior.
1091 */
1092 #if defined(CONFIG_DRM_I915_KMS)
1093 if (i915.modeset != 0)
1094 driver.driver_features |= DRIVER_MODESET;
1095 #endif
1096 if (i915.modeset == 1)
1097 driver.driver_features |= DRIVER_MODESET;
1098
1099 #ifdef CONFIG_VGA_CONSOLE
1100 if (vgacon_text_force() && i915.modeset == -1)
1101 driver.driver_features &= ~DRIVER_MODESET;
1102 #endif
1103
1104 if (!(driver.driver_features & DRIVER_MODESET)) {
1105 driver.get_vblank_timestamp = NULL;
1106 #ifndef CONFIG_DRM_I915_UMS
1107 /* Silently fail loading to not upset userspace. */
1108 return 0;
1109 #endif
1110 }
1111
1112 return drm_pci_init(&driver, &i915_pci_driver);
1113 }
1114
1115 static void __exit i915_exit(void)
1116 {
1117 #ifndef CONFIG_DRM_I915_UMS
1118 if (!(driver.driver_features & DRIVER_MODESET))
1119 return; /* Never loaded a driver. */
1120 #endif
1121
1122 drm_pci_exit(&driver, &i915_pci_driver);
1123 }
1124
1125 module_init(i915_init);
1126 module_exit(i915_exit);
1127
1128 MODULE_AUTHOR(DRIVER_AUTHOR);
1129 MODULE_DESCRIPTION(DRIVER_DESC);
1130 MODULE_LICENSE("GPL and additional rights");
This page took 0.075762 seconds and 6 git commands to generate.