drm/i915: Improve irq handling after gpu resets
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.c
1 /* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*-
2 */
3 /*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30 #include <linux/device.h>
31 #include <drm/drmP.h>
32 #include <drm/i915_drm.h>
33 #include "i915_drv.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36
37 #include <linux/console.h>
38 #include <linux/module.h>
39 #include <linux/pm_runtime.h>
40 #include <drm/drm_crtc_helper.h>
41
42 static struct drm_driver driver;
43
44 #define GEN_DEFAULT_PIPEOFFSETS \
45 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
46 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \
47 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
48 TRANSCODER_C_OFFSET, TRANSCODER_EDP_OFFSET }, \
49 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET }, \
50 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET }, \
51 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET }
52
53 #define GEN_CHV_PIPEOFFSETS \
54 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \
55 CHV_PIPE_C_OFFSET }, \
56 .trans_offsets = { TRANSCODER_A_OFFSET, TRANSCODER_B_OFFSET, \
57 CHV_TRANSCODER_C_OFFSET, }, \
58 .dpll_offsets = { DPLL_A_OFFSET, DPLL_B_OFFSET, \
59 CHV_DPLL_C_OFFSET }, \
60 .dpll_md_offsets = { DPLL_A_MD_OFFSET, DPLL_B_MD_OFFSET, \
61 CHV_DPLL_C_MD_OFFSET }, \
62 .palette_offsets = { PALETTE_A_OFFSET, PALETTE_B_OFFSET, \
63 CHV_PALETTE_C_OFFSET }
64
65 #define CURSOR_OFFSETS \
66 .cursor_offsets = { CURSOR_A_OFFSET, CURSOR_B_OFFSET, CHV_CURSOR_C_OFFSET }
67
68 #define IVB_CURSOR_OFFSETS \
69 .cursor_offsets = { CURSOR_A_OFFSET, IVB_CURSOR_B_OFFSET, IVB_CURSOR_C_OFFSET }
70
71 static const struct intel_device_info intel_i830_info = {
72 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2,
73 .has_overlay = 1, .overlay_needs_physical = 1,
74 .ring_mask = RENDER_RING,
75 GEN_DEFAULT_PIPEOFFSETS,
76 CURSOR_OFFSETS,
77 };
78
79 static const struct intel_device_info intel_845g_info = {
80 .gen = 2, .num_pipes = 1,
81 .has_overlay = 1, .overlay_needs_physical = 1,
82 .ring_mask = RENDER_RING,
83 GEN_DEFAULT_PIPEOFFSETS,
84 CURSOR_OFFSETS,
85 };
86
87 static const struct intel_device_info intel_i85x_info = {
88 .gen = 2, .is_i85x = 1, .is_mobile = 1, .num_pipes = 2,
89 .cursor_needs_physical = 1,
90 .has_overlay = 1, .overlay_needs_physical = 1,
91 .has_fbc = 1,
92 .ring_mask = RENDER_RING,
93 GEN_DEFAULT_PIPEOFFSETS,
94 CURSOR_OFFSETS,
95 };
96
97 static const struct intel_device_info intel_i865g_info = {
98 .gen = 2, .num_pipes = 1,
99 .has_overlay = 1, .overlay_needs_physical = 1,
100 .ring_mask = RENDER_RING,
101 GEN_DEFAULT_PIPEOFFSETS,
102 CURSOR_OFFSETS,
103 };
104
105 static const struct intel_device_info intel_i915g_info = {
106 .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .num_pipes = 2,
107 .has_overlay = 1, .overlay_needs_physical = 1,
108 .ring_mask = RENDER_RING,
109 GEN_DEFAULT_PIPEOFFSETS,
110 CURSOR_OFFSETS,
111 };
112 static const struct intel_device_info intel_i915gm_info = {
113 .gen = 3, .is_mobile = 1, .num_pipes = 2,
114 .cursor_needs_physical = 1,
115 .has_overlay = 1, .overlay_needs_physical = 1,
116 .supports_tv = 1,
117 .has_fbc = 1,
118 .ring_mask = RENDER_RING,
119 GEN_DEFAULT_PIPEOFFSETS,
120 CURSOR_OFFSETS,
121 };
122 static const struct intel_device_info intel_i945g_info = {
123 .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .num_pipes = 2,
124 .has_overlay = 1, .overlay_needs_physical = 1,
125 .ring_mask = RENDER_RING,
126 GEN_DEFAULT_PIPEOFFSETS,
127 CURSOR_OFFSETS,
128 };
129 static const struct intel_device_info intel_i945gm_info = {
130 .gen = 3, .is_i945gm = 1, .is_mobile = 1, .num_pipes = 2,
131 .has_hotplug = 1, .cursor_needs_physical = 1,
132 .has_overlay = 1, .overlay_needs_physical = 1,
133 .supports_tv = 1,
134 .has_fbc = 1,
135 .ring_mask = RENDER_RING,
136 GEN_DEFAULT_PIPEOFFSETS,
137 CURSOR_OFFSETS,
138 };
139
140 static const struct intel_device_info intel_i965g_info = {
141 .gen = 4, .is_broadwater = 1, .num_pipes = 2,
142 .has_hotplug = 1,
143 .has_overlay = 1,
144 .ring_mask = RENDER_RING,
145 GEN_DEFAULT_PIPEOFFSETS,
146 CURSOR_OFFSETS,
147 };
148
149 static const struct intel_device_info intel_i965gm_info = {
150 .gen = 4, .is_crestline = 1, .num_pipes = 2,
151 .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
152 .has_overlay = 1,
153 .supports_tv = 1,
154 .ring_mask = RENDER_RING,
155 GEN_DEFAULT_PIPEOFFSETS,
156 CURSOR_OFFSETS,
157 };
158
159 static const struct intel_device_info intel_g33_info = {
160 .gen = 3, .is_g33 = 1, .num_pipes = 2,
161 .need_gfx_hws = 1, .has_hotplug = 1,
162 .has_overlay = 1,
163 .ring_mask = RENDER_RING,
164 GEN_DEFAULT_PIPEOFFSETS,
165 CURSOR_OFFSETS,
166 };
167
168 static const struct intel_device_info intel_g45_info = {
169 .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .num_pipes = 2,
170 .has_pipe_cxsr = 1, .has_hotplug = 1,
171 .ring_mask = RENDER_RING | BSD_RING,
172 GEN_DEFAULT_PIPEOFFSETS,
173 CURSOR_OFFSETS,
174 };
175
176 static const struct intel_device_info intel_gm45_info = {
177 .gen = 4, .is_g4x = 1, .num_pipes = 2,
178 .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
179 .has_pipe_cxsr = 1, .has_hotplug = 1,
180 .supports_tv = 1,
181 .ring_mask = RENDER_RING | BSD_RING,
182 GEN_DEFAULT_PIPEOFFSETS,
183 CURSOR_OFFSETS,
184 };
185
186 static const struct intel_device_info intel_pineview_info = {
187 .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .num_pipes = 2,
188 .need_gfx_hws = 1, .has_hotplug = 1,
189 .has_overlay = 1,
190 GEN_DEFAULT_PIPEOFFSETS,
191 CURSOR_OFFSETS,
192 };
193
194 static const struct intel_device_info intel_ironlake_d_info = {
195 .gen = 5, .num_pipes = 2,
196 .need_gfx_hws = 1, .has_hotplug = 1,
197 .ring_mask = RENDER_RING | BSD_RING,
198 GEN_DEFAULT_PIPEOFFSETS,
199 CURSOR_OFFSETS,
200 };
201
202 static const struct intel_device_info intel_ironlake_m_info = {
203 .gen = 5, .is_mobile = 1, .num_pipes = 2,
204 .need_gfx_hws = 1, .has_hotplug = 1,
205 .has_fbc = 1,
206 .ring_mask = RENDER_RING | BSD_RING,
207 GEN_DEFAULT_PIPEOFFSETS,
208 CURSOR_OFFSETS,
209 };
210
211 static const struct intel_device_info intel_sandybridge_d_info = {
212 .gen = 6, .num_pipes = 2,
213 .need_gfx_hws = 1, .has_hotplug = 1,
214 .has_fbc = 1,
215 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
216 .has_llc = 1,
217 GEN_DEFAULT_PIPEOFFSETS,
218 CURSOR_OFFSETS,
219 };
220
221 static const struct intel_device_info intel_sandybridge_m_info = {
222 .gen = 6, .is_mobile = 1, .num_pipes = 2,
223 .need_gfx_hws = 1, .has_hotplug = 1,
224 .has_fbc = 1,
225 .ring_mask = RENDER_RING | BSD_RING | BLT_RING,
226 .has_llc = 1,
227 GEN_DEFAULT_PIPEOFFSETS,
228 CURSOR_OFFSETS,
229 };
230
231 #define GEN7_FEATURES \
232 .gen = 7, .num_pipes = 3, \
233 .need_gfx_hws = 1, .has_hotplug = 1, \
234 .has_fbc = 1, \
235 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \
236 .has_llc = 1
237
238 static const struct intel_device_info intel_ivybridge_d_info = {
239 GEN7_FEATURES,
240 .is_ivybridge = 1,
241 GEN_DEFAULT_PIPEOFFSETS,
242 IVB_CURSOR_OFFSETS,
243 };
244
245 static const struct intel_device_info intel_ivybridge_m_info = {
246 GEN7_FEATURES,
247 .is_ivybridge = 1,
248 .is_mobile = 1,
249 GEN_DEFAULT_PIPEOFFSETS,
250 IVB_CURSOR_OFFSETS,
251 };
252
253 static const struct intel_device_info intel_ivybridge_q_info = {
254 GEN7_FEATURES,
255 .is_ivybridge = 1,
256 .num_pipes = 0, /* legal, last one wins */
257 GEN_DEFAULT_PIPEOFFSETS,
258 IVB_CURSOR_OFFSETS,
259 };
260
261 static const struct intel_device_info intel_valleyview_m_info = {
262 GEN7_FEATURES,
263 .is_mobile = 1,
264 .num_pipes = 2,
265 .is_valleyview = 1,
266 .display_mmio_offset = VLV_DISPLAY_BASE,
267 .has_fbc = 0, /* legal, last one wins */
268 .has_llc = 0, /* legal, last one wins */
269 GEN_DEFAULT_PIPEOFFSETS,
270 CURSOR_OFFSETS,
271 };
272
273 static const struct intel_device_info intel_valleyview_d_info = {
274 GEN7_FEATURES,
275 .num_pipes = 2,
276 .is_valleyview = 1,
277 .display_mmio_offset = VLV_DISPLAY_BASE,
278 .has_fbc = 0, /* legal, last one wins */
279 .has_llc = 0, /* legal, last one wins */
280 GEN_DEFAULT_PIPEOFFSETS,
281 CURSOR_OFFSETS,
282 };
283
284 static const struct intel_device_info intel_haswell_d_info = {
285 GEN7_FEATURES,
286 .is_haswell = 1,
287 .has_ddi = 1,
288 .has_fpga_dbg = 1,
289 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
290 GEN_DEFAULT_PIPEOFFSETS,
291 IVB_CURSOR_OFFSETS,
292 };
293
294 static const struct intel_device_info intel_haswell_m_info = {
295 GEN7_FEATURES,
296 .is_haswell = 1,
297 .is_mobile = 1,
298 .has_ddi = 1,
299 .has_fpga_dbg = 1,
300 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
301 GEN_DEFAULT_PIPEOFFSETS,
302 IVB_CURSOR_OFFSETS,
303 };
304
305 static const struct intel_device_info intel_broadwell_d_info = {
306 .gen = 8, .num_pipes = 3,
307 .need_gfx_hws = 1, .has_hotplug = 1,
308 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
309 .has_llc = 1,
310 .has_ddi = 1,
311 .has_fbc = 1,
312 GEN_DEFAULT_PIPEOFFSETS,
313 IVB_CURSOR_OFFSETS,
314 };
315
316 static const struct intel_device_info intel_broadwell_m_info = {
317 .gen = 8, .is_mobile = 1, .num_pipes = 3,
318 .need_gfx_hws = 1, .has_hotplug = 1,
319 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
320 .has_llc = 1,
321 .has_ddi = 1,
322 .has_fbc = 1,
323 GEN_DEFAULT_PIPEOFFSETS,
324 };
325
326 static const struct intel_device_info intel_broadwell_gt3d_info = {
327 .gen = 8, .num_pipes = 3,
328 .need_gfx_hws = 1, .has_hotplug = 1,
329 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
330 .has_llc = 1,
331 .has_ddi = 1,
332 .has_fbc = 1,
333 GEN_DEFAULT_PIPEOFFSETS,
334 };
335
336 static const struct intel_device_info intel_broadwell_gt3m_info = {
337 .gen = 8, .is_mobile = 1, .num_pipes = 3,
338 .need_gfx_hws = 1, .has_hotplug = 1,
339 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING | BSD2_RING,
340 .has_llc = 1,
341 .has_ddi = 1,
342 .has_fbc = 1,
343 GEN_DEFAULT_PIPEOFFSETS,
344 IVB_CURSOR_OFFSETS,
345 };
346
347 static const struct intel_device_info intel_cherryview_info = {
348 .is_preliminary = 1,
349 .gen = 8, .num_pipes = 3,
350 .need_gfx_hws = 1, .has_hotplug = 1,
351 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING,
352 .is_valleyview = 1,
353 .display_mmio_offset = VLV_DISPLAY_BASE,
354 GEN_CHV_PIPEOFFSETS,
355 CURSOR_OFFSETS,
356 };
357
358 /*
359 * Make sure any device matches here are from most specific to most
360 * general. For example, since the Quanta match is based on the subsystem
361 * and subvendor IDs, we need it to come before the more general IVB
362 * PCI ID matches, otherwise we'll use the wrong info struct above.
363 */
364 #define INTEL_PCI_IDS \
365 INTEL_I830_IDS(&intel_i830_info), \
366 INTEL_I845G_IDS(&intel_845g_info), \
367 INTEL_I85X_IDS(&intel_i85x_info), \
368 INTEL_I865G_IDS(&intel_i865g_info), \
369 INTEL_I915G_IDS(&intel_i915g_info), \
370 INTEL_I915GM_IDS(&intel_i915gm_info), \
371 INTEL_I945G_IDS(&intel_i945g_info), \
372 INTEL_I945GM_IDS(&intel_i945gm_info), \
373 INTEL_I965G_IDS(&intel_i965g_info), \
374 INTEL_G33_IDS(&intel_g33_info), \
375 INTEL_I965GM_IDS(&intel_i965gm_info), \
376 INTEL_GM45_IDS(&intel_gm45_info), \
377 INTEL_G45_IDS(&intel_g45_info), \
378 INTEL_PINEVIEW_IDS(&intel_pineview_info), \
379 INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \
380 INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \
381 INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \
382 INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \
383 INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \
384 INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \
385 INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \
386 INTEL_HSW_D_IDS(&intel_haswell_d_info), \
387 INTEL_HSW_M_IDS(&intel_haswell_m_info), \
388 INTEL_VLV_M_IDS(&intel_valleyview_m_info), \
389 INTEL_VLV_D_IDS(&intel_valleyview_d_info), \
390 INTEL_BDW_GT12M_IDS(&intel_broadwell_m_info), \
391 INTEL_BDW_GT12D_IDS(&intel_broadwell_d_info), \
392 INTEL_BDW_GT3M_IDS(&intel_broadwell_gt3m_info), \
393 INTEL_BDW_GT3D_IDS(&intel_broadwell_gt3d_info), \
394 INTEL_CHV_IDS(&intel_cherryview_info)
395
396 static const struct pci_device_id pciidlist[] = { /* aka */
397 INTEL_PCI_IDS,
398 {0, 0, 0}
399 };
400
401 #if defined(CONFIG_DRM_I915_KMS)
402 MODULE_DEVICE_TABLE(pci, pciidlist);
403 #endif
404
405 void intel_detect_pch(struct drm_device *dev)
406 {
407 struct drm_i915_private *dev_priv = dev->dev_private;
408 struct pci_dev *pch = NULL;
409
410 /* In all current cases, num_pipes is equivalent to the PCH_NOP setting
411 * (which really amounts to a PCH but no South Display).
412 */
413 if (INTEL_INFO(dev)->num_pipes == 0) {
414 dev_priv->pch_type = PCH_NOP;
415 return;
416 }
417
418 /*
419 * The reason to probe ISA bridge instead of Dev31:Fun0 is to
420 * make graphics device passthrough work easy for VMM, that only
421 * need to expose ISA bridge to let driver know the real hardware
422 * underneath. This is a requirement from virtualization team.
423 *
424 * In some virtualized environments (e.g. XEN), there is irrelevant
425 * ISA bridge in the system. To work reliably, we should scan trhough
426 * all the ISA bridge devices and check for the first match, instead
427 * of only checking the first one.
428 */
429 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
430 if (pch->vendor == PCI_VENDOR_ID_INTEL) {
431 unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
432 dev_priv->pch_id = id;
433
434 if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
435 dev_priv->pch_type = PCH_IBX;
436 DRM_DEBUG_KMS("Found Ibex Peak PCH\n");
437 WARN_ON(!IS_GEN5(dev));
438 } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) {
439 dev_priv->pch_type = PCH_CPT;
440 DRM_DEBUG_KMS("Found CougarPoint PCH\n");
441 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
442 } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) {
443 /* PantherPoint is CPT compatible */
444 dev_priv->pch_type = PCH_CPT;
445 DRM_DEBUG_KMS("Found PantherPoint PCH\n");
446 WARN_ON(!(IS_GEN6(dev) || IS_IVYBRIDGE(dev)));
447 } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
448 dev_priv->pch_type = PCH_LPT;
449 DRM_DEBUG_KMS("Found LynxPoint PCH\n");
450 WARN_ON(!IS_HASWELL(dev));
451 WARN_ON(IS_ULT(dev));
452 } else if (IS_BROADWELL(dev)) {
453 dev_priv->pch_type = PCH_LPT;
454 dev_priv->pch_id =
455 INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
456 DRM_DEBUG_KMS("This is Broadwell, assuming "
457 "LynxPoint LP PCH\n");
458 } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
459 dev_priv->pch_type = PCH_LPT;
460 DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
461 WARN_ON(!IS_HASWELL(dev));
462 WARN_ON(!IS_ULT(dev));
463 } else
464 continue;
465
466 break;
467 }
468 }
469 if (!pch)
470 DRM_DEBUG_KMS("No PCH found.\n");
471
472 pci_dev_put(pch);
473 }
474
475 bool i915_semaphore_is_enabled(struct drm_device *dev)
476 {
477 if (INTEL_INFO(dev)->gen < 6)
478 return false;
479
480 if (i915.semaphores >= 0)
481 return i915.semaphores;
482
483 /* Until we get further testing... */
484 if (IS_GEN8(dev))
485 return false;
486
487 #ifdef CONFIG_INTEL_IOMMU
488 /* Enable semaphores on SNB when IO remapping is off */
489 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
490 return false;
491 #endif
492
493 return true;
494 }
495
496 static int i915_drm_freeze(struct drm_device *dev)
497 {
498 struct drm_i915_private *dev_priv = dev->dev_private;
499 struct drm_crtc *crtc;
500
501 intel_runtime_pm_get(dev_priv);
502
503 /* ignore lid events during suspend */
504 mutex_lock(&dev_priv->modeset_restore_lock);
505 dev_priv->modeset_restore = MODESET_SUSPENDED;
506 mutex_unlock(&dev_priv->modeset_restore_lock);
507
508 /* We do a lot of poking in a lot of registers, make sure they work
509 * properly. */
510 intel_display_set_init_power(dev_priv, true);
511
512 drm_kms_helper_poll_disable(dev);
513
514 pci_save_state(dev->pdev);
515
516 /* If KMS is active, we do the leavevt stuff here */
517 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
518 int error;
519
520 error = i915_gem_suspend(dev);
521 if (error) {
522 dev_err(&dev->pdev->dev,
523 "GEM idle failed, resume might fail\n");
524 return error;
525 }
526
527 drm_irq_uninstall(dev);
528 dev_priv->enable_hotplug_processing = false;
529
530 intel_disable_gt_powersave(dev);
531
532 /*
533 * Disable CRTCs directly since we want to preserve sw state
534 * for _thaw.
535 */
536 drm_modeset_lock_all(dev);
537 for_each_crtc(dev, crtc) {
538 dev_priv->display.crtc_disable(crtc);
539 }
540 drm_modeset_unlock_all(dev);
541
542 intel_modeset_suspend_hw(dev);
543 }
544
545 i915_gem_suspend_gtt_mappings(dev);
546
547 i915_save_state(dev);
548
549 intel_opregion_fini(dev);
550 intel_uncore_fini(dev);
551
552 console_lock();
553 intel_fbdev_set_suspend(dev, FBINFO_STATE_SUSPENDED);
554 console_unlock();
555
556 dev_priv->suspend_count++;
557
558 return 0;
559 }
560
561 int i915_suspend(struct drm_device *dev, pm_message_t state)
562 {
563 int error;
564
565 if (!dev || !dev->dev_private) {
566 DRM_ERROR("dev: %p\n", dev);
567 DRM_ERROR("DRM not initialized, aborting suspend.\n");
568 return -ENODEV;
569 }
570
571 if (state.event == PM_EVENT_PRETHAW)
572 return 0;
573
574
575 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
576 return 0;
577
578 error = i915_drm_freeze(dev);
579 if (error)
580 return error;
581
582 if (state.event == PM_EVENT_SUSPEND) {
583 /* Shut down the device */
584 pci_disable_device(dev->pdev);
585 pci_set_power_state(dev->pdev, PCI_D3hot);
586 }
587
588 return 0;
589 }
590
591 void intel_console_resume(struct work_struct *work)
592 {
593 struct drm_i915_private *dev_priv =
594 container_of(work, struct drm_i915_private,
595 console_resume_work);
596 struct drm_device *dev = dev_priv->dev;
597
598 console_lock();
599 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
600 console_unlock();
601 }
602
603 static int i915_drm_thaw_early(struct drm_device *dev)
604 {
605 struct drm_i915_private *dev_priv = dev->dev_private;
606
607 intel_uncore_early_sanitize(dev);
608 intel_uncore_sanitize(dev);
609 intel_power_domains_init_hw(dev_priv);
610
611 return 0;
612 }
613
614 static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
615 {
616 struct drm_i915_private *dev_priv = dev->dev_private;
617
618 if (drm_core_check_feature(dev, DRIVER_MODESET) &&
619 restore_gtt_mappings) {
620 mutex_lock(&dev->struct_mutex);
621 i915_gem_restore_gtt_mappings(dev);
622 mutex_unlock(&dev->struct_mutex);
623 }
624
625 i915_restore_state(dev);
626 intel_opregion_setup(dev);
627
628 /* KMS EnterVT equivalent */
629 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
630 intel_init_pch_refclk(dev);
631 drm_mode_config_reset(dev);
632
633 mutex_lock(&dev->struct_mutex);
634 if (i915_gem_init_hw(dev)) {
635 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
636 atomic_set_mask(I915_WEDGED, &dev_priv->gpu_error.reset_counter);
637 }
638 mutex_unlock(&dev->struct_mutex);
639
640 /* We need working interrupts for modeset enabling ... */
641 drm_irq_install(dev, dev->pdev->irq);
642
643 intel_modeset_init_hw(dev);
644
645 drm_modeset_lock_all(dev);
646 intel_modeset_setup_hw_state(dev, true);
647 drm_modeset_unlock_all(dev);
648
649 /*
650 * ... but also need to make sure that hotplug processing
651 * doesn't cause havoc. Like in the driver load code we don't
652 * bother with the tiny race here where we might loose hotplug
653 * notifications.
654 * */
655 intel_hpd_init(dev);
656 dev_priv->enable_hotplug_processing = true;
657 /* Config may have changed between suspend and resume */
658 drm_helper_hpd_irq_event(dev);
659 }
660
661 intel_opregion_init(dev);
662
663 /*
664 * The console lock can be pretty contented on resume due
665 * to all the printk activity. Try to keep it out of the hot
666 * path of resume if possible.
667 */
668 if (console_trylock()) {
669 intel_fbdev_set_suspend(dev, FBINFO_STATE_RUNNING);
670 console_unlock();
671 } else {
672 schedule_work(&dev_priv->console_resume_work);
673 }
674
675 mutex_lock(&dev_priv->modeset_restore_lock);
676 dev_priv->modeset_restore = MODESET_DONE;
677 mutex_unlock(&dev_priv->modeset_restore_lock);
678
679 intel_runtime_pm_put(dev_priv);
680 return 0;
681 }
682
683 static int i915_drm_thaw(struct drm_device *dev)
684 {
685 if (drm_core_check_feature(dev, DRIVER_MODESET))
686 i915_check_and_clear_faults(dev);
687
688 return __i915_drm_thaw(dev, true);
689 }
690
691 static int i915_resume_early(struct drm_device *dev)
692 {
693 if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
694 return 0;
695
696 /*
697 * We have a resume ordering issue with the snd-hda driver also
698 * requiring our device to be power up. Due to the lack of a
699 * parent/child relationship we currently solve this with an early
700 * resume hook.
701 *
702 * FIXME: This should be solved with a special hdmi sink device or
703 * similar so that power domains can be employed.
704 */
705 if (pci_enable_device(dev->pdev))
706 return -EIO;
707
708 pci_set_master(dev->pdev);
709
710 return i915_drm_thaw_early(dev);
711 }
712
713 int i915_resume(struct drm_device *dev)
714 {
715 struct drm_i915_private *dev_priv = dev->dev_private;
716 int ret;
717
718 /*
719 * Platforms with opregion should have sane BIOS, older ones (gen3 and
720 * earlier) need to restore the GTT mappings since the BIOS might clear
721 * all our scratch PTEs.
722 */
723 ret = __i915_drm_thaw(dev, !dev_priv->opregion.header);
724 if (ret)
725 return ret;
726
727 drm_kms_helper_poll_enable(dev);
728 return 0;
729 }
730
731 static int i915_resume_legacy(struct drm_device *dev)
732 {
733 i915_resume_early(dev);
734 i915_resume(dev);
735
736 return 0;
737 }
738
739 /**
740 * i915_reset - reset chip after a hang
741 * @dev: drm device to reset
742 *
743 * Reset the chip. Useful if a hang is detected. Returns zero on successful
744 * reset or otherwise an error code.
745 *
746 * Procedure is fairly simple:
747 * - reset the chip using the reset reg
748 * - re-init context state
749 * - re-init hardware status page
750 * - re-init ring buffer
751 * - re-init interrupt state
752 * - re-init display
753 */
754 int i915_reset(struct drm_device *dev)
755 {
756 struct drm_i915_private *dev_priv = dev->dev_private;
757 bool simulated;
758 int ret;
759
760 if (!i915.reset)
761 return 0;
762
763 mutex_lock(&dev->struct_mutex);
764
765 i915_gem_reset(dev);
766
767 simulated = dev_priv->gpu_error.stop_rings != 0;
768
769 ret = intel_gpu_reset(dev);
770
771 /* Also reset the gpu hangman. */
772 if (simulated) {
773 DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
774 dev_priv->gpu_error.stop_rings = 0;
775 if (ret == -ENODEV) {
776 DRM_INFO("Reset not implemented, but ignoring "
777 "error for simulated gpu hangs\n");
778 ret = 0;
779 }
780 }
781
782 if (ret) {
783 DRM_ERROR("Failed to reset chip: %i\n", ret);
784 mutex_unlock(&dev->struct_mutex);
785 return ret;
786 }
787
788 /* Ok, now get things going again... */
789
790 /*
791 * Everything depends on having the GTT running, so we need to start
792 * there. Fortunately we don't need to do this unless we reset the
793 * chip at a PCI level.
794 *
795 * Next we need to restore the context, but we don't use those
796 * yet either...
797 *
798 * Ring buffer needs to be re-initialized in the KMS case, or if X
799 * was running at the time of the reset (i.e. we weren't VT
800 * switched away).
801 */
802 if (drm_core_check_feature(dev, DRIVER_MODESET) ||
803 !dev_priv->ums.mm_suspended) {
804 dev_priv->ums.mm_suspended = 0;
805
806 ret = i915_gem_init_hw(dev);
807 mutex_unlock(&dev->struct_mutex);
808 if (ret) {
809 DRM_ERROR("Failed hw init on reset %d\n", ret);
810 return ret;
811 }
812
813 /*
814 * FIXME: This races pretty badly against concurrent holders of
815 * ring interrupts. This is possible since we've started to drop
816 * dev->struct_mutex in select places when waiting for the gpu.
817 */
818
819 /*
820 * rps/rc6 re-init is necessary to restore state lost after the
821 * reset and the re-install of gt irqs. Skip for ironlake per
822 * previous concerns that it doesn't respond well to some forms
823 * of re-init after reset.
824 */
825 if (INTEL_INFO(dev)->gen > 5)
826 intel_reset_gt_powersave(dev);
827
828 intel_hpd_init(dev);
829 } else {
830 mutex_unlock(&dev->struct_mutex);
831 }
832
833 return 0;
834 }
835
836 static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
837 {
838 struct intel_device_info *intel_info =
839 (struct intel_device_info *) ent->driver_data;
840
841 if (IS_PRELIMINARY_HW(intel_info) && !i915.preliminary_hw_support) {
842 DRM_INFO("This hardware requires preliminary hardware support.\n"
843 "See CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT, and/or modparam preliminary_hw_support\n");
844 return -ENODEV;
845 }
846
847 /* Only bind to function 0 of the device. Early generations
848 * used function 1 as a placeholder for multi-head. This causes
849 * us confusion instead, especially on the systems where both
850 * functions have the same PCI-ID!
851 */
852 if (PCI_FUNC(pdev->devfn))
853 return -ENODEV;
854
855 driver.driver_features &= ~(DRIVER_USE_AGP);
856
857 return drm_get_pci_dev(pdev, ent, &driver);
858 }
859
860 static void
861 i915_pci_remove(struct pci_dev *pdev)
862 {
863 struct drm_device *dev = pci_get_drvdata(pdev);
864
865 drm_put_dev(dev);
866 }
867
868 static int i915_pm_suspend(struct device *dev)
869 {
870 struct pci_dev *pdev = to_pci_dev(dev);
871 struct drm_device *drm_dev = pci_get_drvdata(pdev);
872
873 if (!drm_dev || !drm_dev->dev_private) {
874 dev_err(dev, "DRM not initialized, aborting suspend.\n");
875 return -ENODEV;
876 }
877
878 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
879 return 0;
880
881 return i915_drm_freeze(drm_dev);
882 }
883
884 static int i915_pm_suspend_late(struct device *dev)
885 {
886 struct pci_dev *pdev = to_pci_dev(dev);
887 struct drm_device *drm_dev = pci_get_drvdata(pdev);
888
889 /*
890 * We have a suspedn ordering issue with the snd-hda driver also
891 * requiring our device to be power up. Due to the lack of a
892 * parent/child relationship we currently solve this with an late
893 * suspend hook.
894 *
895 * FIXME: This should be solved with a special hdmi sink device or
896 * similar so that power domains can be employed.
897 */
898 if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF)
899 return 0;
900
901 pci_disable_device(pdev);
902 pci_set_power_state(pdev, PCI_D3hot);
903
904 return 0;
905 }
906
907 static int i915_pm_resume_early(struct device *dev)
908 {
909 struct pci_dev *pdev = to_pci_dev(dev);
910 struct drm_device *drm_dev = pci_get_drvdata(pdev);
911
912 return i915_resume_early(drm_dev);
913 }
914
915 static int i915_pm_resume(struct device *dev)
916 {
917 struct pci_dev *pdev = to_pci_dev(dev);
918 struct drm_device *drm_dev = pci_get_drvdata(pdev);
919
920 return i915_resume(drm_dev);
921 }
922
923 static int i915_pm_freeze(struct device *dev)
924 {
925 struct pci_dev *pdev = to_pci_dev(dev);
926 struct drm_device *drm_dev = pci_get_drvdata(pdev);
927
928 if (!drm_dev || !drm_dev->dev_private) {
929 dev_err(dev, "DRM not initialized, aborting suspend.\n");
930 return -ENODEV;
931 }
932
933 return i915_drm_freeze(drm_dev);
934 }
935
936 static int i915_pm_thaw_early(struct device *dev)
937 {
938 struct pci_dev *pdev = to_pci_dev(dev);
939 struct drm_device *drm_dev = pci_get_drvdata(pdev);
940
941 return i915_drm_thaw_early(drm_dev);
942 }
943
944 static int i915_pm_thaw(struct device *dev)
945 {
946 struct pci_dev *pdev = to_pci_dev(dev);
947 struct drm_device *drm_dev = pci_get_drvdata(pdev);
948
949 return i915_drm_thaw(drm_dev);
950 }
951
952 static int i915_pm_poweroff(struct device *dev)
953 {
954 struct pci_dev *pdev = to_pci_dev(dev);
955 struct drm_device *drm_dev = pci_get_drvdata(pdev);
956
957 return i915_drm_freeze(drm_dev);
958 }
959
960 static int hsw_runtime_suspend(struct drm_i915_private *dev_priv)
961 {
962 hsw_enable_pc8(dev_priv);
963
964 return 0;
965 }
966
967 static int snb_runtime_resume(struct drm_i915_private *dev_priv)
968 {
969 struct drm_device *dev = dev_priv->dev;
970
971 intel_init_pch_refclk(dev);
972
973 return 0;
974 }
975
976 static int hsw_runtime_resume(struct drm_i915_private *dev_priv)
977 {
978 hsw_disable_pc8(dev_priv);
979
980 return 0;
981 }
982
983 /*
984 * Save all Gunit registers that may be lost after a D3 and a subsequent
985 * S0i[R123] transition. The list of registers needing a save/restore is
986 * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
987 * registers in the following way:
988 * - Driver: saved/restored by the driver
989 * - Punit : saved/restored by the Punit firmware
990 * - No, w/o marking: no need to save/restore, since the register is R/O or
991 * used internally by the HW in a way that doesn't depend
992 * keeping the content across a suspend/resume.
993 * - Debug : used for debugging
994 *
995 * We save/restore all registers marked with 'Driver', with the following
996 * exceptions:
997 * - Registers out of use, including also registers marked with 'Debug'.
998 * These have no effect on the driver's operation, so we don't save/restore
999 * them to reduce the overhead.
1000 * - Registers that are fully setup by an initialization function called from
1001 * the resume path. For example many clock gating and RPS/RC6 registers.
1002 * - Registers that provide the right functionality with their reset defaults.
1003 *
1004 * TODO: Except for registers that based on the above 3 criteria can be safely
1005 * ignored, we save/restore all others, practically treating the HW context as
1006 * a black-box for the driver. Further investigation is needed to reduce the
1007 * saved/restored registers even further, by following the same 3 criteria.
1008 */
1009 static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1010 {
1011 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1012 int i;
1013
1014 /* GAM 0x4000-0x4770 */
1015 s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
1016 s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
1017 s->arb_mode = I915_READ(ARB_MODE);
1018 s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
1019 s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
1020
1021 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1022 s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS_BASE + i * 4);
1023
1024 s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1025 s->gfx_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
1026
1027 s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
1028 s->ecochk = I915_READ(GAM_ECOCHK);
1029 s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
1030 s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
1031
1032 s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
1033
1034 /* MBC 0x9024-0x91D0, 0x8500 */
1035 s->g3dctl = I915_READ(VLV_G3DCTL);
1036 s->gsckgctl = I915_READ(VLV_GSCKGCTL);
1037 s->mbctl = I915_READ(GEN6_MBCTL);
1038
1039 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1040 s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
1041 s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
1042 s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
1043 s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
1044 s->rstctl = I915_READ(GEN6_RSTCTL);
1045 s->misccpctl = I915_READ(GEN7_MISCCPCTL);
1046
1047 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1048 s->gfxpause = I915_READ(GEN6_GFXPAUSE);
1049 s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
1050 s->rpdeuc = I915_READ(GEN6_RPDEUC);
1051 s->ecobus = I915_READ(ECOBUS);
1052 s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
1053 s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
1054 s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
1055 s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
1056 s->rcedata = I915_READ(VLV_RCEDATA);
1057 s->spare2gh = I915_READ(VLV_SPAREG2H);
1058
1059 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1060 s->gt_imr = I915_READ(GTIMR);
1061 s->gt_ier = I915_READ(GTIER);
1062 s->pm_imr = I915_READ(GEN6_PMIMR);
1063 s->pm_ier = I915_READ(GEN6_PMIER);
1064
1065 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1066 s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH_BASE + i * 4);
1067
1068 /* GT SA CZ domain, 0x100000-0x138124 */
1069 s->tilectl = I915_READ(TILECTL);
1070 s->gt_fifoctl = I915_READ(GTFIFOCTL);
1071 s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
1072 s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1073 s->pmwgicz = I915_READ(VLV_PMWGICZ);
1074
1075 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1076 s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
1077 s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
1078 s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
1079
1080 /*
1081 * Not saving any of:
1082 * DFT, 0x9800-0x9EC0
1083 * SARB, 0xB000-0xB1FC
1084 * GAC, 0x5208-0x524C, 0x14000-0x14C000
1085 * PCI CFG
1086 */
1087 }
1088
1089 static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
1090 {
1091 struct vlv_s0ix_state *s = &dev_priv->vlv_s0ix_state;
1092 u32 val;
1093 int i;
1094
1095 /* GAM 0x4000-0x4770 */
1096 I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
1097 I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
1098 I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
1099 I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
1100 I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
1101
1102 for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
1103 I915_WRITE(GEN7_LRA_LIMITS_BASE + i * 4, s->lra_limits[i]);
1104
1105 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
1106 I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->gfx_max_req_count);
1107
1108 I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
1109 I915_WRITE(GAM_ECOCHK, s->ecochk);
1110 I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
1111 I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
1112
1113 I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
1114
1115 /* MBC 0x9024-0x91D0, 0x8500 */
1116 I915_WRITE(VLV_G3DCTL, s->g3dctl);
1117 I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
1118 I915_WRITE(GEN6_MBCTL, s->mbctl);
1119
1120 /* GCP 0x9400-0x9424, 0x8100-0x810C */
1121 I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
1122 I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
1123 I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
1124 I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
1125 I915_WRITE(GEN6_RSTCTL, s->rstctl);
1126 I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
1127
1128 /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
1129 I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
1130 I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
1131 I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
1132 I915_WRITE(ECOBUS, s->ecobus);
1133 I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
1134 I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
1135 I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
1136 I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
1137 I915_WRITE(VLV_RCEDATA, s->rcedata);
1138 I915_WRITE(VLV_SPAREG2H, s->spare2gh);
1139
1140 /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
1141 I915_WRITE(GTIMR, s->gt_imr);
1142 I915_WRITE(GTIER, s->gt_ier);
1143 I915_WRITE(GEN6_PMIMR, s->pm_imr);
1144 I915_WRITE(GEN6_PMIER, s->pm_ier);
1145
1146 for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
1147 I915_WRITE(GEN7_GT_SCRATCH_BASE + i * 4, s->gt_scratch[i]);
1148
1149 /* GT SA CZ domain, 0x100000-0x138124 */
1150 I915_WRITE(TILECTL, s->tilectl);
1151 I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
1152 /*
1153 * Preserve the GT allow wake and GFX force clock bit, they are not
1154 * be restored, as they are used to control the s0ix suspend/resume
1155 * sequence by the caller.
1156 */
1157 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1158 val &= VLV_GTLC_ALLOWWAKEREQ;
1159 val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
1160 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1161
1162 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1163 val &= VLV_GFX_CLK_FORCE_ON_BIT;
1164 val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
1165 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1166
1167 I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
1168
1169 /* Gunit-Display CZ domain, 0x182028-0x1821CF */
1170 I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
1171 I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
1172 I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
1173 }
1174
1175 int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
1176 {
1177 u32 val;
1178 int err;
1179
1180 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1181 WARN_ON(!!(val & VLV_GFX_CLK_FORCE_ON_BIT) == force_on);
1182
1183 #define COND (I915_READ(VLV_GTLC_SURVIVABILITY_REG) & VLV_GFX_CLK_STATUS_BIT)
1184 /* Wait for a previous force-off to settle */
1185 if (force_on) {
1186 err = wait_for(!COND, 20);
1187 if (err) {
1188 DRM_ERROR("timeout waiting for GFX clock force-off (%08x)\n",
1189 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1190 return err;
1191 }
1192 }
1193
1194 val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
1195 val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
1196 if (force_on)
1197 val |= VLV_GFX_CLK_FORCE_ON_BIT;
1198 I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
1199
1200 if (!force_on)
1201 return 0;
1202
1203 err = wait_for(COND, 20);
1204 if (err)
1205 DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
1206 I915_READ(VLV_GTLC_SURVIVABILITY_REG));
1207
1208 return err;
1209 #undef COND
1210 }
1211
1212 static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
1213 {
1214 u32 val;
1215 int err = 0;
1216
1217 val = I915_READ(VLV_GTLC_WAKE_CTRL);
1218 val &= ~VLV_GTLC_ALLOWWAKEREQ;
1219 if (allow)
1220 val |= VLV_GTLC_ALLOWWAKEREQ;
1221 I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
1222 POSTING_READ(VLV_GTLC_WAKE_CTRL);
1223
1224 #define COND (!!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEACK) == \
1225 allow)
1226 err = wait_for(COND, 1);
1227 if (err)
1228 DRM_ERROR("timeout disabling GT waking\n");
1229 return err;
1230 #undef COND
1231 }
1232
1233 static int vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
1234 bool wait_for_on)
1235 {
1236 u32 mask;
1237 u32 val;
1238 int err;
1239
1240 mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
1241 val = wait_for_on ? mask : 0;
1242 #define COND ((I915_READ(VLV_GTLC_PW_STATUS) & mask) == val)
1243 if (COND)
1244 return 0;
1245
1246 DRM_DEBUG_KMS("waiting for GT wells to go %s (%08x)\n",
1247 wait_for_on ? "on" : "off",
1248 I915_READ(VLV_GTLC_PW_STATUS));
1249
1250 /*
1251 * RC6 transitioning can be delayed up to 2 msec (see
1252 * valleyview_enable_rps), use 3 msec for safety.
1253 */
1254 err = wait_for(COND, 3);
1255 if (err)
1256 DRM_ERROR("timeout waiting for GT wells to go %s\n",
1257 wait_for_on ? "on" : "off");
1258
1259 return err;
1260 #undef COND
1261 }
1262
1263 static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
1264 {
1265 if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
1266 return;
1267
1268 DRM_ERROR("GT register access while GT waking disabled\n");
1269 I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
1270 }
1271
1272 static int vlv_runtime_suspend(struct drm_i915_private *dev_priv)
1273 {
1274 u32 mask;
1275 int err;
1276
1277 /*
1278 * Bspec defines the following GT well on flags as debug only, so
1279 * don't treat them as hard failures.
1280 */
1281 (void)vlv_wait_for_gt_wells(dev_priv, false);
1282
1283 mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
1284 WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
1285
1286 vlv_check_no_gt_access(dev_priv);
1287
1288 err = vlv_force_gfx_clock(dev_priv, true);
1289 if (err)
1290 goto err1;
1291
1292 err = vlv_allow_gt_wake(dev_priv, false);
1293 if (err)
1294 goto err2;
1295 vlv_save_gunit_s0ix_state(dev_priv);
1296
1297 err = vlv_force_gfx_clock(dev_priv, false);
1298 if (err)
1299 goto err2;
1300
1301 return 0;
1302
1303 err2:
1304 /* For safety always re-enable waking and disable gfx clock forcing */
1305 vlv_allow_gt_wake(dev_priv, true);
1306 err1:
1307 vlv_force_gfx_clock(dev_priv, false);
1308
1309 return err;
1310 }
1311
1312 static int vlv_runtime_resume(struct drm_i915_private *dev_priv)
1313 {
1314 struct drm_device *dev = dev_priv->dev;
1315 int err;
1316 int ret;
1317
1318 /*
1319 * If any of the steps fail just try to continue, that's the best we
1320 * can do at this point. Return the first error code (which will also
1321 * leave RPM permanently disabled).
1322 */
1323 ret = vlv_force_gfx_clock(dev_priv, true);
1324
1325 vlv_restore_gunit_s0ix_state(dev_priv);
1326
1327 err = vlv_allow_gt_wake(dev_priv, true);
1328 if (!ret)
1329 ret = err;
1330
1331 err = vlv_force_gfx_clock(dev_priv, false);
1332 if (!ret)
1333 ret = err;
1334
1335 vlv_check_no_gt_access(dev_priv);
1336
1337 intel_init_clock_gating(dev);
1338 i915_gem_restore_fences(dev);
1339
1340 return ret;
1341 }
1342
1343 static int intel_runtime_suspend(struct device *device)
1344 {
1345 struct pci_dev *pdev = to_pci_dev(device);
1346 struct drm_device *dev = pci_get_drvdata(pdev);
1347 struct drm_i915_private *dev_priv = dev->dev_private;
1348 int ret;
1349
1350 if (WARN_ON_ONCE(!(dev_priv->rps.enabled && intel_enable_rc6(dev))))
1351 return -ENODEV;
1352
1353 WARN_ON(!HAS_RUNTIME_PM(dev));
1354 assert_force_wake_inactive(dev_priv);
1355
1356 DRM_DEBUG_KMS("Suspending device\n");
1357
1358 /*
1359 * We could deadlock here in case another thread holding struct_mutex
1360 * calls RPM suspend concurrently, since the RPM suspend will wait
1361 * first for this RPM suspend to finish. In this case the concurrent
1362 * RPM resume will be followed by its RPM suspend counterpart. Still
1363 * for consistency return -EAGAIN, which will reschedule this suspend.
1364 */
1365 if (!mutex_trylock(&dev->struct_mutex)) {
1366 DRM_DEBUG_KMS("device lock contention, deffering suspend\n");
1367 /*
1368 * Bump the expiration timestamp, otherwise the suspend won't
1369 * be rescheduled.
1370 */
1371 pm_runtime_mark_last_busy(device);
1372
1373 return -EAGAIN;
1374 }
1375 /*
1376 * We are safe here against re-faults, since the fault handler takes
1377 * an RPM reference.
1378 */
1379 i915_gem_release_all_mmaps(dev_priv);
1380 mutex_unlock(&dev->struct_mutex);
1381
1382 /*
1383 * rps.work can't be rearmed here, since we get here only after making
1384 * sure the GPU is idle and the RPS freq is set to the minimum. See
1385 * intel_mark_idle().
1386 */
1387 cancel_work_sync(&dev_priv->rps.work);
1388 intel_runtime_pm_disable_interrupts(dev);
1389
1390 if (IS_GEN6(dev)) {
1391 ret = 0;
1392 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1393 ret = hsw_runtime_suspend(dev_priv);
1394 } else if (IS_VALLEYVIEW(dev)) {
1395 ret = vlv_runtime_suspend(dev_priv);
1396 } else {
1397 ret = -ENODEV;
1398 WARN_ON(1);
1399 }
1400
1401 if (ret) {
1402 DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
1403 intel_runtime_pm_restore_interrupts(dev);
1404
1405 return ret;
1406 }
1407
1408 del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
1409 dev_priv->pm.suspended = true;
1410
1411 /*
1412 * current versions of firmware which depend on this opregion
1413 * notification have repurposed the D1 definition to mean
1414 * "runtime suspended" vs. what you would normally expect (D3)
1415 * to distinguish it from notifications that might be sent
1416 * via the suspend path.
1417 */
1418 intel_opregion_notify_adapter(dev, PCI_D1);
1419
1420 DRM_DEBUG_KMS("Device suspended\n");
1421 return 0;
1422 }
1423
1424 static int intel_runtime_resume(struct device *device)
1425 {
1426 struct pci_dev *pdev = to_pci_dev(device);
1427 struct drm_device *dev = pci_get_drvdata(pdev);
1428 struct drm_i915_private *dev_priv = dev->dev_private;
1429 int ret;
1430
1431 WARN_ON(!HAS_RUNTIME_PM(dev));
1432
1433 DRM_DEBUG_KMS("Resuming device\n");
1434
1435 intel_opregion_notify_adapter(dev, PCI_D0);
1436 dev_priv->pm.suspended = false;
1437
1438 if (IS_GEN6(dev)) {
1439 ret = snb_runtime_resume(dev_priv);
1440 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1441 ret = hsw_runtime_resume(dev_priv);
1442 } else if (IS_VALLEYVIEW(dev)) {
1443 ret = vlv_runtime_resume(dev_priv);
1444 } else {
1445 WARN_ON(1);
1446 ret = -ENODEV;
1447 }
1448
1449 /*
1450 * No point of rolling back things in case of an error, as the best
1451 * we can do is to hope that things will still work (and disable RPM).
1452 */
1453 i915_gem_init_swizzling(dev);
1454 gen6_update_ring_freq(dev);
1455
1456 intel_runtime_pm_restore_interrupts(dev);
1457 intel_reset_gt_powersave(dev);
1458
1459 if (ret)
1460 DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
1461 else
1462 DRM_DEBUG_KMS("Device resumed\n");
1463
1464 return ret;
1465 }
1466
1467 static const struct dev_pm_ops i915_pm_ops = {
1468 .suspend = i915_pm_suspend,
1469 .suspend_late = i915_pm_suspend_late,
1470 .resume_early = i915_pm_resume_early,
1471 .resume = i915_pm_resume,
1472 .freeze = i915_pm_freeze,
1473 .thaw_early = i915_pm_thaw_early,
1474 .thaw = i915_pm_thaw,
1475 .poweroff = i915_pm_poweroff,
1476 .restore_early = i915_pm_resume_early,
1477 .restore = i915_pm_resume,
1478 .runtime_suspend = intel_runtime_suspend,
1479 .runtime_resume = intel_runtime_resume,
1480 };
1481
1482 static const struct vm_operations_struct i915_gem_vm_ops = {
1483 .fault = i915_gem_fault,
1484 .open = drm_gem_vm_open,
1485 .close = drm_gem_vm_close,
1486 };
1487
1488 static const struct file_operations i915_driver_fops = {
1489 .owner = THIS_MODULE,
1490 .open = drm_open,
1491 .release = drm_release,
1492 .unlocked_ioctl = drm_ioctl,
1493 .mmap = drm_gem_mmap,
1494 .poll = drm_poll,
1495 .read = drm_read,
1496 #ifdef CONFIG_COMPAT
1497 .compat_ioctl = i915_compat_ioctl,
1498 #endif
1499 .llseek = noop_llseek,
1500 };
1501
1502 static struct drm_driver driver = {
1503 /* Don't use MTRRs here; the Xserver or userspace app should
1504 * deal with them for Intel hardware.
1505 */
1506 .driver_features =
1507 DRIVER_USE_AGP |
1508 DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | DRIVER_PRIME |
1509 DRIVER_RENDER,
1510 .load = i915_driver_load,
1511 .unload = i915_driver_unload,
1512 .open = i915_driver_open,
1513 .lastclose = i915_driver_lastclose,
1514 .preclose = i915_driver_preclose,
1515 .postclose = i915_driver_postclose,
1516
1517 /* Used in place of i915_pm_ops for non-DRIVER_MODESET */
1518 .suspend = i915_suspend,
1519 .resume = i915_resume_legacy,
1520
1521 .device_is_agp = i915_driver_device_is_agp,
1522 .master_create = i915_master_create,
1523 .master_destroy = i915_master_destroy,
1524 #if defined(CONFIG_DEBUG_FS)
1525 .debugfs_init = i915_debugfs_init,
1526 .debugfs_cleanup = i915_debugfs_cleanup,
1527 #endif
1528 .gem_free_object = i915_gem_free_object,
1529 .gem_vm_ops = &i915_gem_vm_ops,
1530
1531 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
1532 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
1533 .gem_prime_export = i915_gem_prime_export,
1534 .gem_prime_import = i915_gem_prime_import,
1535
1536 .dumb_create = i915_gem_dumb_create,
1537 .dumb_map_offset = i915_gem_mmap_gtt,
1538 .dumb_destroy = drm_gem_dumb_destroy,
1539 .ioctls = i915_ioctls,
1540 .fops = &i915_driver_fops,
1541 .name = DRIVER_NAME,
1542 .desc = DRIVER_DESC,
1543 .date = DRIVER_DATE,
1544 .major = DRIVER_MAJOR,
1545 .minor = DRIVER_MINOR,
1546 .patchlevel = DRIVER_PATCHLEVEL,
1547 };
1548
1549 static struct pci_driver i915_pci_driver = {
1550 .name = DRIVER_NAME,
1551 .id_table = pciidlist,
1552 .probe = i915_pci_probe,
1553 .remove = i915_pci_remove,
1554 .driver.pm = &i915_pm_ops,
1555 };
1556
1557 static int __init i915_init(void)
1558 {
1559 driver.num_ioctls = i915_max_ioctl;
1560
1561 /*
1562 * If CONFIG_DRM_I915_KMS is set, default to KMS unless
1563 * explicitly disabled with the module pararmeter.
1564 *
1565 * Otherwise, just follow the parameter (defaulting to off).
1566 *
1567 * Allow optional vga_text_mode_force boot option to override
1568 * the default behavior.
1569 */
1570 #if defined(CONFIG_DRM_I915_KMS)
1571 if (i915.modeset != 0)
1572 driver.driver_features |= DRIVER_MODESET;
1573 #endif
1574 if (i915.modeset == 1)
1575 driver.driver_features |= DRIVER_MODESET;
1576
1577 #ifdef CONFIG_VGA_CONSOLE
1578 if (vgacon_text_force() && i915.modeset == -1)
1579 driver.driver_features &= ~DRIVER_MODESET;
1580 #endif
1581
1582 if (!(driver.driver_features & DRIVER_MODESET)) {
1583 driver.get_vblank_timestamp = NULL;
1584 #ifndef CONFIG_DRM_I915_UMS
1585 /* Silently fail loading to not upset userspace. */
1586 return 0;
1587 #endif
1588 }
1589
1590 return drm_pci_init(&driver, &i915_pci_driver);
1591 }
1592
1593 static void __exit i915_exit(void)
1594 {
1595 #ifndef CONFIG_DRM_I915_UMS
1596 if (!(driver.driver_features & DRIVER_MODESET))
1597 return; /* Never loaded a driver. */
1598 #endif
1599
1600 drm_pci_exit(&driver, &i915_pci_driver);
1601 }
1602
1603 module_init(i915_init);
1604 module_exit(i915_exit);
1605
1606 MODULE_AUTHOR(DRIVER_AUTHOR);
1607 MODULE_DESCRIPTION(DRIVER_DESC);
1608 MODULE_LICENSE("GPL and additional rights");
This page took 0.180097 seconds and 5 git commands to generate.