Merge remote-tracking branch 'airlied/drm-next' into HEAD
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ibx[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55
56 static const u32 hpd_cpt[] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63
64 static const u32 hpd_mask_i915[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71 };
72
73 static const u32 hpd_status_g4x[] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
100 } while (0)
101
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
110 } while (0)
111
112 /*
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
114 */
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
117 if (val) { \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
119 (reg), val); \
120 I915_WRITE((reg), 0xffffffff); \
121 POSTING_READ(reg); \
122 I915_WRITE((reg), 0xffffffff); \
123 POSTING_READ(reg); \
124 } \
125 } while (0)
126
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
132 } while (0)
133
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
139 } while (0)
140
141 /* For display hotplug interrupt */
142 void
143 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
144 {
145 assert_spin_locked(&dev_priv->irq_lock);
146
147 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
148 return;
149
150 if ((dev_priv->irq_mask & mask) != 0) {
151 dev_priv->irq_mask &= ~mask;
152 I915_WRITE(DEIMR, dev_priv->irq_mask);
153 POSTING_READ(DEIMR);
154 }
155 }
156
157 void
158 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
159 {
160 assert_spin_locked(&dev_priv->irq_lock);
161
162 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
163 return;
164
165 if ((dev_priv->irq_mask & mask) != mask) {
166 dev_priv->irq_mask |= mask;
167 I915_WRITE(DEIMR, dev_priv->irq_mask);
168 POSTING_READ(DEIMR);
169 }
170 }
171
172 /**
173 * ilk_update_gt_irq - update GTIMR
174 * @dev_priv: driver private
175 * @interrupt_mask: mask of interrupt bits to update
176 * @enabled_irq_mask: mask of interrupt bits to enable
177 */
178 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
179 uint32_t interrupt_mask,
180 uint32_t enabled_irq_mask)
181 {
182 assert_spin_locked(&dev_priv->irq_lock);
183
184 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
185 return;
186
187 dev_priv->gt_irq_mask &= ~interrupt_mask;
188 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
189 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
190 POSTING_READ(GTIMR);
191 }
192
193 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
194 {
195 ilk_update_gt_irq(dev_priv, mask, mask);
196 }
197
198 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
199 {
200 ilk_update_gt_irq(dev_priv, mask, 0);
201 }
202
203 /**
204 * snb_update_pm_irq - update GEN6_PMIMR
205 * @dev_priv: driver private
206 * @interrupt_mask: mask of interrupt bits to update
207 * @enabled_irq_mask: mask of interrupt bits to enable
208 */
209 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
210 uint32_t interrupt_mask,
211 uint32_t enabled_irq_mask)
212 {
213 uint32_t new_val;
214
215 assert_spin_locked(&dev_priv->irq_lock);
216
217 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
218 return;
219
220 new_val = dev_priv->pm_irq_mask;
221 new_val &= ~interrupt_mask;
222 new_val |= (~enabled_irq_mask & interrupt_mask);
223
224 if (new_val != dev_priv->pm_irq_mask) {
225 dev_priv->pm_irq_mask = new_val;
226 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
227 POSTING_READ(GEN6_PMIMR);
228 }
229 }
230
231 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
232 {
233 snb_update_pm_irq(dev_priv, mask, mask);
234 }
235
236 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
237 {
238 snb_update_pm_irq(dev_priv, mask, 0);
239 }
240
241 /**
242 * bdw_update_pm_irq - update GT interrupt 2
243 * @dev_priv: driver private
244 * @interrupt_mask: mask of interrupt bits to update
245 * @enabled_irq_mask: mask of interrupt bits to enable
246 *
247 * Copied from the snb function, updated with relevant register offsets
248 */
249 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
250 uint32_t interrupt_mask,
251 uint32_t enabled_irq_mask)
252 {
253 uint32_t new_val;
254
255 assert_spin_locked(&dev_priv->irq_lock);
256
257 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
258 return;
259
260 new_val = dev_priv->pm_irq_mask;
261 new_val &= ~interrupt_mask;
262 new_val |= (~enabled_irq_mask & interrupt_mask);
263
264 if (new_val != dev_priv->pm_irq_mask) {
265 dev_priv->pm_irq_mask = new_val;
266 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
267 POSTING_READ(GEN8_GT_IMR(2));
268 }
269 }
270
271 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
272 {
273 bdw_update_pm_irq(dev_priv, mask, mask);
274 }
275
276 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
277 {
278 bdw_update_pm_irq(dev_priv, mask, 0);
279 }
280
281 /**
282 * ibx_display_interrupt_update - update SDEIMR
283 * @dev_priv: driver private
284 * @interrupt_mask: mask of interrupt bits to update
285 * @enabled_irq_mask: mask of interrupt bits to enable
286 */
287 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
288 uint32_t interrupt_mask,
289 uint32_t enabled_irq_mask)
290 {
291 uint32_t sdeimr = I915_READ(SDEIMR);
292 sdeimr &= ~interrupt_mask;
293 sdeimr |= (~enabled_irq_mask & interrupt_mask);
294
295 assert_spin_locked(&dev_priv->irq_lock);
296
297 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
298 return;
299
300 I915_WRITE(SDEIMR, sdeimr);
301 POSTING_READ(SDEIMR);
302 }
303
304 static void
305 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
306 u32 enable_mask, u32 status_mask)
307 {
308 u32 reg = PIPESTAT(pipe);
309 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
310
311 assert_spin_locked(&dev_priv->irq_lock);
312 WARN_ON(!intel_irqs_enabled(dev_priv));
313
314 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
315 status_mask & ~PIPESTAT_INT_STATUS_MASK,
316 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
317 pipe_name(pipe), enable_mask, status_mask))
318 return;
319
320 if ((pipestat & enable_mask) == enable_mask)
321 return;
322
323 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
324
325 /* Enable the interrupt, clear any pending status */
326 pipestat |= enable_mask | status_mask;
327 I915_WRITE(reg, pipestat);
328 POSTING_READ(reg);
329 }
330
331 static void
332 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
333 u32 enable_mask, u32 status_mask)
334 {
335 u32 reg = PIPESTAT(pipe);
336 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
337
338 assert_spin_locked(&dev_priv->irq_lock);
339 WARN_ON(!intel_irqs_enabled(dev_priv));
340
341 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
342 status_mask & ~PIPESTAT_INT_STATUS_MASK,
343 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
344 pipe_name(pipe), enable_mask, status_mask))
345 return;
346
347 if ((pipestat & enable_mask) == 0)
348 return;
349
350 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
351
352 pipestat &= ~enable_mask;
353 I915_WRITE(reg, pipestat);
354 POSTING_READ(reg);
355 }
356
357 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
358 {
359 u32 enable_mask = status_mask << 16;
360
361 /*
362 * On pipe A we don't support the PSR interrupt yet,
363 * on pipe B and C the same bit MBZ.
364 */
365 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
366 return 0;
367 /*
368 * On pipe B and C we don't support the PSR interrupt yet, on pipe
369 * A the same bit is for perf counters which we don't use either.
370 */
371 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
372 return 0;
373
374 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
375 SPRITE0_FLIP_DONE_INT_EN_VLV |
376 SPRITE1_FLIP_DONE_INT_EN_VLV);
377 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
378 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
379 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
380 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
381
382 return enable_mask;
383 }
384
385 void
386 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
387 u32 status_mask)
388 {
389 u32 enable_mask;
390
391 if (IS_VALLEYVIEW(dev_priv->dev))
392 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
393 status_mask);
394 else
395 enable_mask = status_mask << 16;
396 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
397 }
398
399 void
400 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
401 u32 status_mask)
402 {
403 u32 enable_mask;
404
405 if (IS_VALLEYVIEW(dev_priv->dev))
406 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
407 status_mask);
408 else
409 enable_mask = status_mask << 16;
410 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
411 }
412
413 /**
414 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
415 */
416 static void i915_enable_asle_pipestat(struct drm_device *dev)
417 {
418 struct drm_i915_private *dev_priv = dev->dev_private;
419
420 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
421 return;
422
423 spin_lock_irq(&dev_priv->irq_lock);
424
425 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
426 if (INTEL_INFO(dev)->gen >= 4)
427 i915_enable_pipestat(dev_priv, PIPE_A,
428 PIPE_LEGACY_BLC_EVENT_STATUS);
429
430 spin_unlock_irq(&dev_priv->irq_lock);
431 }
432
433 /**
434 * i915_pipe_enabled - check if a pipe is enabled
435 * @dev: DRM device
436 * @pipe: pipe to check
437 *
438 * Reading certain registers when the pipe is disabled can hang the chip.
439 * Use this routine to make sure the PLL is running and the pipe is active
440 * before reading such registers if unsure.
441 */
442 static int
443 i915_pipe_enabled(struct drm_device *dev, int pipe)
444 {
445 struct drm_i915_private *dev_priv = dev->dev_private;
446
447 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
448 /* Locking is horribly broken here, but whatever. */
449 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
450 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
451
452 return intel_crtc->active;
453 } else {
454 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
455 }
456 }
457
458 /*
459 * This timing diagram depicts the video signal in and
460 * around the vertical blanking period.
461 *
462 * Assumptions about the fictitious mode used in this example:
463 * vblank_start >= 3
464 * vsync_start = vblank_start + 1
465 * vsync_end = vblank_start + 2
466 * vtotal = vblank_start + 3
467 *
468 * start of vblank:
469 * latch double buffered registers
470 * increment frame counter (ctg+)
471 * generate start of vblank interrupt (gen4+)
472 * |
473 * | frame start:
474 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
475 * | may be shifted forward 1-3 extra lines via PIPECONF
476 * | |
477 * | | start of vsync:
478 * | | generate vsync interrupt
479 * | | |
480 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
481 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
482 * ----va---> <-----------------vb--------------------> <--------va-------------
483 * | | <----vs-----> |
484 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
485 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
486 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
487 * | | |
488 * last visible pixel first visible pixel
489 * | increment frame counter (gen3/4)
490 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
491 *
492 * x = horizontal active
493 * _ = horizontal blanking
494 * hs = horizontal sync
495 * va = vertical active
496 * vb = vertical blanking
497 * vs = vertical sync
498 * vbs = vblank_start (number)
499 *
500 * Summary:
501 * - most events happen at the start of horizontal sync
502 * - frame start happens at the start of horizontal blank, 1-4 lines
503 * (depending on PIPECONF settings) after the start of vblank
504 * - gen3/4 pixel and frame counter are synchronized with the start
505 * of horizontal active on the first line of vertical active
506 */
507
508 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
509 {
510 /* Gen2 doesn't have a hardware frame counter */
511 return 0;
512 }
513
514 /* Called from drm generic code, passed a 'crtc', which
515 * we use as a pipe index
516 */
517 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
518 {
519 struct drm_i915_private *dev_priv = dev->dev_private;
520 unsigned long high_frame;
521 unsigned long low_frame;
522 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
523
524 if (!i915_pipe_enabled(dev, pipe)) {
525 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
526 "pipe %c\n", pipe_name(pipe));
527 return 0;
528 }
529
530 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
531 struct intel_crtc *intel_crtc =
532 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
533 const struct drm_display_mode *mode =
534 &intel_crtc->config.adjusted_mode;
535
536 htotal = mode->crtc_htotal;
537 hsync_start = mode->crtc_hsync_start;
538 vbl_start = mode->crtc_vblank_start;
539 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
540 vbl_start = DIV_ROUND_UP(vbl_start, 2);
541 } else {
542 enum transcoder cpu_transcoder = (enum transcoder) pipe;
543
544 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
545 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
546 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
547 if ((I915_READ(PIPECONF(cpu_transcoder)) &
548 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
549 vbl_start = DIV_ROUND_UP(vbl_start, 2);
550 }
551
552 /* Convert to pixel count */
553 vbl_start *= htotal;
554
555 /* Start of vblank event occurs at start of hsync */
556 vbl_start -= htotal - hsync_start;
557
558 high_frame = PIPEFRAME(pipe);
559 low_frame = PIPEFRAMEPIXEL(pipe);
560
561 /*
562 * High & low register fields aren't synchronized, so make sure
563 * we get a low value that's stable across two reads of the high
564 * register.
565 */
566 do {
567 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
568 low = I915_READ(low_frame);
569 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
570 } while (high1 != high2);
571
572 high1 >>= PIPE_FRAME_HIGH_SHIFT;
573 pixel = low & PIPE_PIXEL_MASK;
574 low >>= PIPE_FRAME_LOW_SHIFT;
575
576 /*
577 * The frame counter increments at beginning of active.
578 * Cook up a vblank counter by also checking the pixel
579 * counter against vblank start.
580 */
581 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
582 }
583
584 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
585 {
586 struct drm_i915_private *dev_priv = dev->dev_private;
587 int reg = PIPE_FRMCOUNT_GM45(pipe);
588
589 if (!i915_pipe_enabled(dev, pipe)) {
590 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
591 "pipe %c\n", pipe_name(pipe));
592 return 0;
593 }
594
595 return I915_READ(reg);
596 }
597
598 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
599 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
600
601 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
602 {
603 struct drm_device *dev = crtc->base.dev;
604 struct drm_i915_private *dev_priv = dev->dev_private;
605 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
606 enum pipe pipe = crtc->pipe;
607 int position, vtotal;
608
609 vtotal = mode->crtc_vtotal;
610 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
611 vtotal /= 2;
612
613 if (IS_GEN2(dev))
614 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
615 else
616 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
617
618 /*
619 * See update_scanline_offset() for the details on the
620 * scanline_offset adjustment.
621 */
622 return (position + crtc->scanline_offset) % vtotal;
623 }
624
625 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
626 unsigned int flags, int *vpos, int *hpos,
627 ktime_t *stime, ktime_t *etime)
628 {
629 struct drm_i915_private *dev_priv = dev->dev_private;
630 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
631 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
632 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
633 int position;
634 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
635 bool in_vbl = true;
636 int ret = 0;
637 unsigned long irqflags;
638
639 if (!intel_crtc->active) {
640 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
641 "pipe %c\n", pipe_name(pipe));
642 return 0;
643 }
644
645 htotal = mode->crtc_htotal;
646 hsync_start = mode->crtc_hsync_start;
647 vtotal = mode->crtc_vtotal;
648 vbl_start = mode->crtc_vblank_start;
649 vbl_end = mode->crtc_vblank_end;
650
651 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
652 vbl_start = DIV_ROUND_UP(vbl_start, 2);
653 vbl_end /= 2;
654 vtotal /= 2;
655 }
656
657 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
658
659 /*
660 * Lock uncore.lock, as we will do multiple timing critical raw
661 * register reads, potentially with preemption disabled, so the
662 * following code must not block on uncore.lock.
663 */
664 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
665
666 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
667
668 /* Get optional system timestamp before query. */
669 if (stime)
670 *stime = ktime_get();
671
672 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
673 /* No obvious pixelcount register. Only query vertical
674 * scanout position from Display scan line register.
675 */
676 position = __intel_get_crtc_scanline(intel_crtc);
677 } else {
678 /* Have access to pixelcount since start of frame.
679 * We can split this into vertical and horizontal
680 * scanout position.
681 */
682 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
683
684 /* convert to pixel counts */
685 vbl_start *= htotal;
686 vbl_end *= htotal;
687 vtotal *= htotal;
688
689 /*
690 * In interlaced modes, the pixel counter counts all pixels,
691 * so one field will have htotal more pixels. In order to avoid
692 * the reported position from jumping backwards when the pixel
693 * counter is beyond the length of the shorter field, just
694 * clamp the position the length of the shorter field. This
695 * matches how the scanline counter based position works since
696 * the scanline counter doesn't count the two half lines.
697 */
698 if (position >= vtotal)
699 position = vtotal - 1;
700
701 /*
702 * Start of vblank interrupt is triggered at start of hsync,
703 * just prior to the first active line of vblank. However we
704 * consider lines to start at the leading edge of horizontal
705 * active. So, should we get here before we've crossed into
706 * the horizontal active of the first line in vblank, we would
707 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
708 * always add htotal-hsync_start to the current pixel position.
709 */
710 position = (position + htotal - hsync_start) % vtotal;
711 }
712
713 /* Get optional system timestamp after query. */
714 if (etime)
715 *etime = ktime_get();
716
717 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
718
719 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
720
721 in_vbl = position >= vbl_start && position < vbl_end;
722
723 /*
724 * While in vblank, position will be negative
725 * counting up towards 0 at vbl_end. And outside
726 * vblank, position will be positive counting
727 * up since vbl_end.
728 */
729 if (position >= vbl_start)
730 position -= vbl_end;
731 else
732 position += vtotal - vbl_end;
733
734 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
735 *vpos = position;
736 *hpos = 0;
737 } else {
738 *vpos = position / htotal;
739 *hpos = position - (*vpos * htotal);
740 }
741
742 /* In vblank? */
743 if (in_vbl)
744 ret |= DRM_SCANOUTPOS_IN_VBLANK;
745
746 return ret;
747 }
748
749 int intel_get_crtc_scanline(struct intel_crtc *crtc)
750 {
751 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
752 unsigned long irqflags;
753 int position;
754
755 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
756 position = __intel_get_crtc_scanline(crtc);
757 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
758
759 return position;
760 }
761
762 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
763 int *max_error,
764 struct timeval *vblank_time,
765 unsigned flags)
766 {
767 struct drm_crtc *crtc;
768
769 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
770 DRM_ERROR("Invalid crtc %d\n", pipe);
771 return -EINVAL;
772 }
773
774 /* Get drm_crtc to timestamp: */
775 crtc = intel_get_crtc_for_pipe(dev, pipe);
776 if (crtc == NULL) {
777 DRM_ERROR("Invalid crtc %d\n", pipe);
778 return -EINVAL;
779 }
780
781 if (!crtc->enabled) {
782 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
783 return -EBUSY;
784 }
785
786 /* Helper routine in DRM core does all the work: */
787 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
788 vblank_time, flags,
789 crtc,
790 &to_intel_crtc(crtc)->config.adjusted_mode);
791 }
792
793 static bool intel_hpd_irq_event(struct drm_device *dev,
794 struct drm_connector *connector)
795 {
796 enum drm_connector_status old_status;
797
798 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
799 old_status = connector->status;
800
801 connector->status = connector->funcs->detect(connector, false);
802 if (old_status == connector->status)
803 return false;
804
805 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
806 connector->base.id,
807 connector->name,
808 drm_get_connector_status_name(old_status),
809 drm_get_connector_status_name(connector->status));
810
811 return true;
812 }
813
814 static void i915_digport_work_func(struct work_struct *work)
815 {
816 struct drm_i915_private *dev_priv =
817 container_of(work, struct drm_i915_private, dig_port_work);
818 u32 long_port_mask, short_port_mask;
819 struct intel_digital_port *intel_dig_port;
820 int i, ret;
821 u32 old_bits = 0;
822
823 spin_lock_irq(&dev_priv->irq_lock);
824 long_port_mask = dev_priv->long_hpd_port_mask;
825 dev_priv->long_hpd_port_mask = 0;
826 short_port_mask = dev_priv->short_hpd_port_mask;
827 dev_priv->short_hpd_port_mask = 0;
828 spin_unlock_irq(&dev_priv->irq_lock);
829
830 for (i = 0; i < I915_MAX_PORTS; i++) {
831 bool valid = false;
832 bool long_hpd = false;
833 intel_dig_port = dev_priv->hpd_irq_port[i];
834 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
835 continue;
836
837 if (long_port_mask & (1 << i)) {
838 valid = true;
839 long_hpd = true;
840 } else if (short_port_mask & (1 << i))
841 valid = true;
842
843 if (valid) {
844 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
845 if (ret == true) {
846 /* if we get true fallback to old school hpd */
847 old_bits |= (1 << intel_dig_port->base.hpd_pin);
848 }
849 }
850 }
851
852 if (old_bits) {
853 spin_lock_irq(&dev_priv->irq_lock);
854 dev_priv->hpd_event_bits |= old_bits;
855 spin_unlock_irq(&dev_priv->irq_lock);
856 schedule_work(&dev_priv->hotplug_work);
857 }
858 }
859
860 /*
861 * Handle hotplug events outside the interrupt handler proper.
862 */
863 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
864
865 static void i915_hotplug_work_func(struct work_struct *work)
866 {
867 struct drm_i915_private *dev_priv =
868 container_of(work, struct drm_i915_private, hotplug_work);
869 struct drm_device *dev = dev_priv->dev;
870 struct drm_mode_config *mode_config = &dev->mode_config;
871 struct intel_connector *intel_connector;
872 struct intel_encoder *intel_encoder;
873 struct drm_connector *connector;
874 bool hpd_disabled = false;
875 bool changed = false;
876 u32 hpd_event_bits;
877
878 mutex_lock(&mode_config->mutex);
879 DRM_DEBUG_KMS("running encoder hotplug functions\n");
880
881 spin_lock_irq(&dev_priv->irq_lock);
882
883 hpd_event_bits = dev_priv->hpd_event_bits;
884 dev_priv->hpd_event_bits = 0;
885 list_for_each_entry(connector, &mode_config->connector_list, head) {
886 intel_connector = to_intel_connector(connector);
887 if (!intel_connector->encoder)
888 continue;
889 intel_encoder = intel_connector->encoder;
890 if (intel_encoder->hpd_pin > HPD_NONE &&
891 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
892 connector->polled == DRM_CONNECTOR_POLL_HPD) {
893 DRM_INFO("HPD interrupt storm detected on connector %s: "
894 "switching from hotplug detection to polling\n",
895 connector->name);
896 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
897 connector->polled = DRM_CONNECTOR_POLL_CONNECT
898 | DRM_CONNECTOR_POLL_DISCONNECT;
899 hpd_disabled = true;
900 }
901 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
902 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
903 connector->name, intel_encoder->hpd_pin);
904 }
905 }
906 /* if there were no outputs to poll, poll was disabled,
907 * therefore make sure it's enabled when disabling HPD on
908 * some connectors */
909 if (hpd_disabled) {
910 drm_kms_helper_poll_enable(dev);
911 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
912 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
913 }
914
915 spin_unlock_irq(&dev_priv->irq_lock);
916
917 list_for_each_entry(connector, &mode_config->connector_list, head) {
918 intel_connector = to_intel_connector(connector);
919 if (!intel_connector->encoder)
920 continue;
921 intel_encoder = intel_connector->encoder;
922 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
923 if (intel_encoder->hot_plug)
924 intel_encoder->hot_plug(intel_encoder);
925 if (intel_hpd_irq_event(dev, connector))
926 changed = true;
927 }
928 }
929 mutex_unlock(&mode_config->mutex);
930
931 if (changed)
932 drm_kms_helper_hotplug_event(dev);
933 }
934
935 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
936 {
937 struct drm_i915_private *dev_priv = dev->dev_private;
938 u32 busy_up, busy_down, max_avg, min_avg;
939 u8 new_delay;
940
941 spin_lock(&mchdev_lock);
942
943 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
944
945 new_delay = dev_priv->ips.cur_delay;
946
947 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
948 busy_up = I915_READ(RCPREVBSYTUPAVG);
949 busy_down = I915_READ(RCPREVBSYTDNAVG);
950 max_avg = I915_READ(RCBMAXAVG);
951 min_avg = I915_READ(RCBMINAVG);
952
953 /* Handle RCS change request from hw */
954 if (busy_up > max_avg) {
955 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
956 new_delay = dev_priv->ips.cur_delay - 1;
957 if (new_delay < dev_priv->ips.max_delay)
958 new_delay = dev_priv->ips.max_delay;
959 } else if (busy_down < min_avg) {
960 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
961 new_delay = dev_priv->ips.cur_delay + 1;
962 if (new_delay > dev_priv->ips.min_delay)
963 new_delay = dev_priv->ips.min_delay;
964 }
965
966 if (ironlake_set_drps(dev, new_delay))
967 dev_priv->ips.cur_delay = new_delay;
968
969 spin_unlock(&mchdev_lock);
970
971 return;
972 }
973
974 static void notify_ring(struct drm_device *dev,
975 struct intel_engine_cs *ring)
976 {
977 if (!intel_ring_initialized(ring))
978 return;
979
980 trace_i915_gem_request_complete(ring);
981
982 wake_up_all(&ring->irq_queue);
983 i915_queue_hangcheck(dev);
984 }
985
986 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
987 struct intel_rps_ei *rps_ei)
988 {
989 u32 cz_ts, cz_freq_khz;
990 u32 render_count, media_count;
991 u32 elapsed_render, elapsed_media, elapsed_time;
992 u32 residency = 0;
993
994 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
995 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
996
997 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
998 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
999
1000 if (rps_ei->cz_clock == 0) {
1001 rps_ei->cz_clock = cz_ts;
1002 rps_ei->render_c0 = render_count;
1003 rps_ei->media_c0 = media_count;
1004
1005 return dev_priv->rps.cur_freq;
1006 }
1007
1008 elapsed_time = cz_ts - rps_ei->cz_clock;
1009 rps_ei->cz_clock = cz_ts;
1010
1011 elapsed_render = render_count - rps_ei->render_c0;
1012 rps_ei->render_c0 = render_count;
1013
1014 elapsed_media = media_count - rps_ei->media_c0;
1015 rps_ei->media_c0 = media_count;
1016
1017 /* Convert all the counters into common unit of milli sec */
1018 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1019 elapsed_render /= cz_freq_khz;
1020 elapsed_media /= cz_freq_khz;
1021
1022 /*
1023 * Calculate overall C0 residency percentage
1024 * only if elapsed time is non zero
1025 */
1026 if (elapsed_time) {
1027 residency =
1028 ((max(elapsed_render, elapsed_media) * 100)
1029 / elapsed_time);
1030 }
1031
1032 return residency;
1033 }
1034
1035 /**
1036 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1037 * busy-ness calculated from C0 counters of render & media power wells
1038 * @dev_priv: DRM device private
1039 *
1040 */
1041 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1042 {
1043 u32 residency_C0_up = 0, residency_C0_down = 0;
1044 int new_delay, adj;
1045
1046 dev_priv->rps.ei_interrupt_count++;
1047
1048 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1049
1050
1051 if (dev_priv->rps.up_ei.cz_clock == 0) {
1052 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1053 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1054 return dev_priv->rps.cur_freq;
1055 }
1056
1057
1058 /*
1059 * To down throttle, C0 residency should be less than down threshold
1060 * for continous EI intervals. So calculate down EI counters
1061 * once in VLV_INT_COUNT_FOR_DOWN_EI
1062 */
1063 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1064
1065 dev_priv->rps.ei_interrupt_count = 0;
1066
1067 residency_C0_down = vlv_c0_residency(dev_priv,
1068 &dev_priv->rps.down_ei);
1069 } else {
1070 residency_C0_up = vlv_c0_residency(dev_priv,
1071 &dev_priv->rps.up_ei);
1072 }
1073
1074 new_delay = dev_priv->rps.cur_freq;
1075
1076 adj = dev_priv->rps.last_adj;
1077 /* C0 residency is greater than UP threshold. Increase Frequency */
1078 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1079 if (adj > 0)
1080 adj *= 2;
1081 else
1082 adj = 1;
1083
1084 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1085 new_delay = dev_priv->rps.cur_freq + adj;
1086
1087 /*
1088 * For better performance, jump directly
1089 * to RPe if we're below it.
1090 */
1091 if (new_delay < dev_priv->rps.efficient_freq)
1092 new_delay = dev_priv->rps.efficient_freq;
1093
1094 } else if (!dev_priv->rps.ei_interrupt_count &&
1095 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1096 if (adj < 0)
1097 adj *= 2;
1098 else
1099 adj = -1;
1100 /*
1101 * This means, C0 residency is less than down threshold over
1102 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1103 */
1104 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1105 new_delay = dev_priv->rps.cur_freq + adj;
1106 }
1107
1108 return new_delay;
1109 }
1110
1111 static void gen6_pm_rps_work(struct work_struct *work)
1112 {
1113 struct drm_i915_private *dev_priv =
1114 container_of(work, struct drm_i915_private, rps.work);
1115 u32 pm_iir;
1116 int new_delay, adj;
1117
1118 spin_lock_irq(&dev_priv->irq_lock);
1119 pm_iir = dev_priv->rps.pm_iir;
1120 dev_priv->rps.pm_iir = 0;
1121 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1122 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1123 else {
1124 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1125 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1126 }
1127 spin_unlock_irq(&dev_priv->irq_lock);
1128
1129 /* Make sure we didn't queue anything we're not going to process. */
1130 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1131
1132 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1133 return;
1134
1135 mutex_lock(&dev_priv->rps.hw_lock);
1136
1137 adj = dev_priv->rps.last_adj;
1138 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1139 if (adj > 0)
1140 adj *= 2;
1141 else {
1142 /* CHV needs even encode values */
1143 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1144 }
1145 new_delay = dev_priv->rps.cur_freq + adj;
1146
1147 /*
1148 * For better performance, jump directly
1149 * to RPe if we're below it.
1150 */
1151 if (new_delay < dev_priv->rps.efficient_freq)
1152 new_delay = dev_priv->rps.efficient_freq;
1153 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1154 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1155 new_delay = dev_priv->rps.efficient_freq;
1156 else
1157 new_delay = dev_priv->rps.min_freq_softlimit;
1158 adj = 0;
1159 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1160 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1161 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1162 if (adj < 0)
1163 adj *= 2;
1164 else {
1165 /* CHV needs even encode values */
1166 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1167 }
1168 new_delay = dev_priv->rps.cur_freq + adj;
1169 } else { /* unknown event */
1170 new_delay = dev_priv->rps.cur_freq;
1171 }
1172
1173 /* sysfs frequency interfaces may have snuck in while servicing the
1174 * interrupt
1175 */
1176 new_delay = clamp_t(int, new_delay,
1177 dev_priv->rps.min_freq_softlimit,
1178 dev_priv->rps.max_freq_softlimit);
1179
1180 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1181
1182 if (IS_VALLEYVIEW(dev_priv->dev))
1183 valleyview_set_rps(dev_priv->dev, new_delay);
1184 else
1185 gen6_set_rps(dev_priv->dev, new_delay);
1186
1187 mutex_unlock(&dev_priv->rps.hw_lock);
1188 }
1189
1190
1191 /**
1192 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1193 * occurred.
1194 * @work: workqueue struct
1195 *
1196 * Doesn't actually do anything except notify userspace. As a consequence of
1197 * this event, userspace should try to remap the bad rows since statistically
1198 * it is likely the same row is more likely to go bad again.
1199 */
1200 static void ivybridge_parity_work(struct work_struct *work)
1201 {
1202 struct drm_i915_private *dev_priv =
1203 container_of(work, struct drm_i915_private, l3_parity.error_work);
1204 u32 error_status, row, bank, subbank;
1205 char *parity_event[6];
1206 uint32_t misccpctl;
1207 uint8_t slice = 0;
1208
1209 /* We must turn off DOP level clock gating to access the L3 registers.
1210 * In order to prevent a get/put style interface, acquire struct mutex
1211 * any time we access those registers.
1212 */
1213 mutex_lock(&dev_priv->dev->struct_mutex);
1214
1215 /* If we've screwed up tracking, just let the interrupt fire again */
1216 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1217 goto out;
1218
1219 misccpctl = I915_READ(GEN7_MISCCPCTL);
1220 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1221 POSTING_READ(GEN7_MISCCPCTL);
1222
1223 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1224 u32 reg;
1225
1226 slice--;
1227 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1228 break;
1229
1230 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1231
1232 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1233
1234 error_status = I915_READ(reg);
1235 row = GEN7_PARITY_ERROR_ROW(error_status);
1236 bank = GEN7_PARITY_ERROR_BANK(error_status);
1237 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1238
1239 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1240 POSTING_READ(reg);
1241
1242 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1243 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1244 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1245 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1246 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1247 parity_event[5] = NULL;
1248
1249 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1250 KOBJ_CHANGE, parity_event);
1251
1252 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1253 slice, row, bank, subbank);
1254
1255 kfree(parity_event[4]);
1256 kfree(parity_event[3]);
1257 kfree(parity_event[2]);
1258 kfree(parity_event[1]);
1259 }
1260
1261 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1262
1263 out:
1264 WARN_ON(dev_priv->l3_parity.which_slice);
1265 spin_lock_irq(&dev_priv->irq_lock);
1266 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1267 spin_unlock_irq(&dev_priv->irq_lock);
1268
1269 mutex_unlock(&dev_priv->dev->struct_mutex);
1270 }
1271
1272 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1273 {
1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275
1276 if (!HAS_L3_DPF(dev))
1277 return;
1278
1279 spin_lock(&dev_priv->irq_lock);
1280 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1281 spin_unlock(&dev_priv->irq_lock);
1282
1283 iir &= GT_PARITY_ERROR(dev);
1284 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1285 dev_priv->l3_parity.which_slice |= 1 << 1;
1286
1287 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1288 dev_priv->l3_parity.which_slice |= 1 << 0;
1289
1290 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1291 }
1292
1293 static void ilk_gt_irq_handler(struct drm_device *dev,
1294 struct drm_i915_private *dev_priv,
1295 u32 gt_iir)
1296 {
1297 if (gt_iir &
1298 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1299 notify_ring(dev, &dev_priv->ring[RCS]);
1300 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1301 notify_ring(dev, &dev_priv->ring[VCS]);
1302 }
1303
1304 static void snb_gt_irq_handler(struct drm_device *dev,
1305 struct drm_i915_private *dev_priv,
1306 u32 gt_iir)
1307 {
1308
1309 if (gt_iir &
1310 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1311 notify_ring(dev, &dev_priv->ring[RCS]);
1312 if (gt_iir & GT_BSD_USER_INTERRUPT)
1313 notify_ring(dev, &dev_priv->ring[VCS]);
1314 if (gt_iir & GT_BLT_USER_INTERRUPT)
1315 notify_ring(dev, &dev_priv->ring[BCS]);
1316
1317 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1318 GT_BSD_CS_ERROR_INTERRUPT |
1319 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1320 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1321 gt_iir);
1322 }
1323
1324 if (gt_iir & GT_PARITY_ERROR(dev))
1325 ivybridge_parity_error_irq_handler(dev, gt_iir);
1326 }
1327
1328 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1329 {
1330 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1331 return;
1332
1333 spin_lock(&dev_priv->irq_lock);
1334 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1335 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1336 spin_unlock(&dev_priv->irq_lock);
1337
1338 queue_work(dev_priv->wq, &dev_priv->rps.work);
1339 }
1340
1341 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1342 struct drm_i915_private *dev_priv,
1343 u32 master_ctl)
1344 {
1345 struct intel_engine_cs *ring;
1346 u32 rcs, bcs, vcs;
1347 uint32_t tmp = 0;
1348 irqreturn_t ret = IRQ_NONE;
1349
1350 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1351 tmp = I915_READ(GEN8_GT_IIR(0));
1352 if (tmp) {
1353 I915_WRITE(GEN8_GT_IIR(0), tmp);
1354 ret = IRQ_HANDLED;
1355
1356 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1357 ring = &dev_priv->ring[RCS];
1358 if (rcs & GT_RENDER_USER_INTERRUPT)
1359 notify_ring(dev, ring);
1360 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1361 intel_execlists_handle_ctx_events(ring);
1362
1363 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1364 ring = &dev_priv->ring[BCS];
1365 if (bcs & GT_RENDER_USER_INTERRUPT)
1366 notify_ring(dev, ring);
1367 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1368 intel_execlists_handle_ctx_events(ring);
1369 } else
1370 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1371 }
1372
1373 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1374 tmp = I915_READ(GEN8_GT_IIR(1));
1375 if (tmp) {
1376 I915_WRITE(GEN8_GT_IIR(1), tmp);
1377 ret = IRQ_HANDLED;
1378
1379 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1380 ring = &dev_priv->ring[VCS];
1381 if (vcs & GT_RENDER_USER_INTERRUPT)
1382 notify_ring(dev, ring);
1383 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1384 intel_execlists_handle_ctx_events(ring);
1385
1386 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1387 ring = &dev_priv->ring[VCS2];
1388 if (vcs & GT_RENDER_USER_INTERRUPT)
1389 notify_ring(dev, ring);
1390 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1391 intel_execlists_handle_ctx_events(ring);
1392 } else
1393 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1394 }
1395
1396 if (master_ctl & GEN8_GT_PM_IRQ) {
1397 tmp = I915_READ(GEN8_GT_IIR(2));
1398 if (tmp & dev_priv->pm_rps_events) {
1399 I915_WRITE(GEN8_GT_IIR(2),
1400 tmp & dev_priv->pm_rps_events);
1401 ret = IRQ_HANDLED;
1402 gen8_rps_irq_handler(dev_priv, tmp);
1403 } else
1404 DRM_ERROR("The master control interrupt lied (PM)!\n");
1405 }
1406
1407 if (master_ctl & GEN8_GT_VECS_IRQ) {
1408 tmp = I915_READ(GEN8_GT_IIR(3));
1409 if (tmp) {
1410 I915_WRITE(GEN8_GT_IIR(3), tmp);
1411 ret = IRQ_HANDLED;
1412
1413 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1414 ring = &dev_priv->ring[VECS];
1415 if (vcs & GT_RENDER_USER_INTERRUPT)
1416 notify_ring(dev, ring);
1417 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1418 intel_execlists_handle_ctx_events(ring);
1419 } else
1420 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1421 }
1422
1423 return ret;
1424 }
1425
1426 #define HPD_STORM_DETECT_PERIOD 1000
1427 #define HPD_STORM_THRESHOLD 5
1428
1429 static int pch_port_to_hotplug_shift(enum port port)
1430 {
1431 switch (port) {
1432 case PORT_A:
1433 case PORT_E:
1434 default:
1435 return -1;
1436 case PORT_B:
1437 return 0;
1438 case PORT_C:
1439 return 8;
1440 case PORT_D:
1441 return 16;
1442 }
1443 }
1444
1445 static int i915_port_to_hotplug_shift(enum port port)
1446 {
1447 switch (port) {
1448 case PORT_A:
1449 case PORT_E:
1450 default:
1451 return -1;
1452 case PORT_B:
1453 return 17;
1454 case PORT_C:
1455 return 19;
1456 case PORT_D:
1457 return 21;
1458 }
1459 }
1460
1461 static inline enum port get_port_from_pin(enum hpd_pin pin)
1462 {
1463 switch (pin) {
1464 case HPD_PORT_B:
1465 return PORT_B;
1466 case HPD_PORT_C:
1467 return PORT_C;
1468 case HPD_PORT_D:
1469 return PORT_D;
1470 default:
1471 return PORT_A; /* no hpd */
1472 }
1473 }
1474
1475 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1476 u32 hotplug_trigger,
1477 u32 dig_hotplug_reg,
1478 const u32 *hpd)
1479 {
1480 struct drm_i915_private *dev_priv = dev->dev_private;
1481 int i;
1482 enum port port;
1483 bool storm_detected = false;
1484 bool queue_dig = false, queue_hp = false;
1485 u32 dig_shift;
1486 u32 dig_port_mask = 0;
1487
1488 if (!hotplug_trigger)
1489 return;
1490
1491 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1492 hotplug_trigger, dig_hotplug_reg);
1493
1494 spin_lock(&dev_priv->irq_lock);
1495 for (i = 1; i < HPD_NUM_PINS; i++) {
1496 if (!(hpd[i] & hotplug_trigger))
1497 continue;
1498
1499 port = get_port_from_pin(i);
1500 if (port && dev_priv->hpd_irq_port[port]) {
1501 bool long_hpd;
1502
1503 if (HAS_PCH_SPLIT(dev)) {
1504 dig_shift = pch_port_to_hotplug_shift(port);
1505 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1506 } else {
1507 dig_shift = i915_port_to_hotplug_shift(port);
1508 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1509 }
1510
1511 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1512 port_name(port),
1513 long_hpd ? "long" : "short");
1514 /* for long HPD pulses we want to have the digital queue happen,
1515 but we still want HPD storm detection to function. */
1516 if (long_hpd) {
1517 dev_priv->long_hpd_port_mask |= (1 << port);
1518 dig_port_mask |= hpd[i];
1519 } else {
1520 /* for short HPD just trigger the digital queue */
1521 dev_priv->short_hpd_port_mask |= (1 << port);
1522 hotplug_trigger &= ~hpd[i];
1523 }
1524 queue_dig = true;
1525 }
1526 }
1527
1528 for (i = 1; i < HPD_NUM_PINS; i++) {
1529 if (hpd[i] & hotplug_trigger &&
1530 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1531 /*
1532 * On GMCH platforms the interrupt mask bits only
1533 * prevent irq generation, not the setting of the
1534 * hotplug bits itself. So only WARN about unexpected
1535 * interrupts on saner platforms.
1536 */
1537 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1538 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1539 hotplug_trigger, i, hpd[i]);
1540
1541 continue;
1542 }
1543
1544 if (!(hpd[i] & hotplug_trigger) ||
1545 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1546 continue;
1547
1548 if (!(dig_port_mask & hpd[i])) {
1549 dev_priv->hpd_event_bits |= (1 << i);
1550 queue_hp = true;
1551 }
1552
1553 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1554 dev_priv->hpd_stats[i].hpd_last_jiffies
1555 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1556 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1557 dev_priv->hpd_stats[i].hpd_cnt = 0;
1558 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1559 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1560 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1561 dev_priv->hpd_event_bits &= ~(1 << i);
1562 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1563 storm_detected = true;
1564 } else {
1565 dev_priv->hpd_stats[i].hpd_cnt++;
1566 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1567 dev_priv->hpd_stats[i].hpd_cnt);
1568 }
1569 }
1570
1571 if (storm_detected)
1572 dev_priv->display.hpd_irq_setup(dev);
1573 spin_unlock(&dev_priv->irq_lock);
1574
1575 /*
1576 * Our hotplug handler can grab modeset locks (by calling down into the
1577 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1578 * queue for otherwise the flush_work in the pageflip code will
1579 * deadlock.
1580 */
1581 if (queue_dig)
1582 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1583 if (queue_hp)
1584 schedule_work(&dev_priv->hotplug_work);
1585 }
1586
1587 static void gmbus_irq_handler(struct drm_device *dev)
1588 {
1589 struct drm_i915_private *dev_priv = dev->dev_private;
1590
1591 wake_up_all(&dev_priv->gmbus_wait_queue);
1592 }
1593
1594 static void dp_aux_irq_handler(struct drm_device *dev)
1595 {
1596 struct drm_i915_private *dev_priv = dev->dev_private;
1597
1598 wake_up_all(&dev_priv->gmbus_wait_queue);
1599 }
1600
1601 #if defined(CONFIG_DEBUG_FS)
1602 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1603 uint32_t crc0, uint32_t crc1,
1604 uint32_t crc2, uint32_t crc3,
1605 uint32_t crc4)
1606 {
1607 struct drm_i915_private *dev_priv = dev->dev_private;
1608 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1609 struct intel_pipe_crc_entry *entry;
1610 int head, tail;
1611
1612 spin_lock(&pipe_crc->lock);
1613
1614 if (!pipe_crc->entries) {
1615 spin_unlock(&pipe_crc->lock);
1616 DRM_ERROR("spurious interrupt\n");
1617 return;
1618 }
1619
1620 head = pipe_crc->head;
1621 tail = pipe_crc->tail;
1622
1623 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1624 spin_unlock(&pipe_crc->lock);
1625 DRM_ERROR("CRC buffer overflowing\n");
1626 return;
1627 }
1628
1629 entry = &pipe_crc->entries[head];
1630
1631 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1632 entry->crc[0] = crc0;
1633 entry->crc[1] = crc1;
1634 entry->crc[2] = crc2;
1635 entry->crc[3] = crc3;
1636 entry->crc[4] = crc4;
1637
1638 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1639 pipe_crc->head = head;
1640
1641 spin_unlock(&pipe_crc->lock);
1642
1643 wake_up_interruptible(&pipe_crc->wq);
1644 }
1645 #else
1646 static inline void
1647 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1648 uint32_t crc0, uint32_t crc1,
1649 uint32_t crc2, uint32_t crc3,
1650 uint32_t crc4) {}
1651 #endif
1652
1653
1654 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1655 {
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657
1658 display_pipe_crc_irq_handler(dev, pipe,
1659 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1660 0, 0, 0, 0);
1661 }
1662
1663 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1664 {
1665 struct drm_i915_private *dev_priv = dev->dev_private;
1666
1667 display_pipe_crc_irq_handler(dev, pipe,
1668 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1669 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1670 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1671 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1672 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1673 }
1674
1675 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1676 {
1677 struct drm_i915_private *dev_priv = dev->dev_private;
1678 uint32_t res1, res2;
1679
1680 if (INTEL_INFO(dev)->gen >= 3)
1681 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1682 else
1683 res1 = 0;
1684
1685 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1686 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1687 else
1688 res2 = 0;
1689
1690 display_pipe_crc_irq_handler(dev, pipe,
1691 I915_READ(PIPE_CRC_RES_RED(pipe)),
1692 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1693 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1694 res1, res2);
1695 }
1696
1697 /* The RPS events need forcewake, so we add them to a work queue and mask their
1698 * IMR bits until the work is done. Other interrupts can be processed without
1699 * the work queue. */
1700 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1701 {
1702 if (pm_iir & dev_priv->pm_rps_events) {
1703 spin_lock(&dev_priv->irq_lock);
1704 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1705 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1706 spin_unlock(&dev_priv->irq_lock);
1707
1708 queue_work(dev_priv->wq, &dev_priv->rps.work);
1709 }
1710
1711 if (HAS_VEBOX(dev_priv->dev)) {
1712 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1713 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1714
1715 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1716 i915_handle_error(dev_priv->dev, false,
1717 "VEBOX CS error interrupt 0x%08x",
1718 pm_iir);
1719 }
1720 }
1721 }
1722
1723 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1724 {
1725 if (!drm_handle_vblank(dev, pipe))
1726 return false;
1727
1728 return true;
1729 }
1730
1731 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1732 {
1733 struct drm_i915_private *dev_priv = dev->dev_private;
1734 u32 pipe_stats[I915_MAX_PIPES] = { };
1735 int pipe;
1736
1737 spin_lock(&dev_priv->irq_lock);
1738 for_each_pipe(dev_priv, pipe) {
1739 int reg;
1740 u32 mask, iir_bit = 0;
1741
1742 /*
1743 * PIPESTAT bits get signalled even when the interrupt is
1744 * disabled with the mask bits, and some of the status bits do
1745 * not generate interrupts at all (like the underrun bit). Hence
1746 * we need to be careful that we only handle what we want to
1747 * handle.
1748 */
1749
1750 /* fifo underruns are filterered in the underrun handler. */
1751 mask = PIPE_FIFO_UNDERRUN_STATUS;
1752
1753 switch (pipe) {
1754 case PIPE_A:
1755 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1756 break;
1757 case PIPE_B:
1758 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1759 break;
1760 case PIPE_C:
1761 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1762 break;
1763 }
1764 if (iir & iir_bit)
1765 mask |= dev_priv->pipestat_irq_mask[pipe];
1766
1767 if (!mask)
1768 continue;
1769
1770 reg = PIPESTAT(pipe);
1771 mask |= PIPESTAT_INT_ENABLE_MASK;
1772 pipe_stats[pipe] = I915_READ(reg) & mask;
1773
1774 /*
1775 * Clear the PIPE*STAT regs before the IIR
1776 */
1777 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1778 PIPESTAT_INT_STATUS_MASK))
1779 I915_WRITE(reg, pipe_stats[pipe]);
1780 }
1781 spin_unlock(&dev_priv->irq_lock);
1782
1783 for_each_pipe(dev_priv, pipe) {
1784 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1785 intel_pipe_handle_vblank(dev, pipe))
1786 intel_check_page_flip(dev, pipe);
1787
1788 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1789 intel_prepare_page_flip(dev, pipe);
1790 intel_finish_page_flip(dev, pipe);
1791 }
1792
1793 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1794 i9xx_pipe_crc_irq_handler(dev, pipe);
1795
1796 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1797 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1798 }
1799
1800 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1801 gmbus_irq_handler(dev);
1802 }
1803
1804 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1805 {
1806 struct drm_i915_private *dev_priv = dev->dev_private;
1807 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1808
1809 if (hotplug_status) {
1810 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1811 /*
1812 * Make sure hotplug status is cleared before we clear IIR, or else we
1813 * may miss hotplug events.
1814 */
1815 POSTING_READ(PORT_HOTPLUG_STAT);
1816
1817 if (IS_G4X(dev)) {
1818 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1819
1820 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1821 } else {
1822 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1823
1824 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1825 }
1826
1827 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1828 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1829 dp_aux_irq_handler(dev);
1830 }
1831 }
1832
1833 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1834 {
1835 struct drm_device *dev = arg;
1836 struct drm_i915_private *dev_priv = dev->dev_private;
1837 u32 iir, gt_iir, pm_iir;
1838 irqreturn_t ret = IRQ_NONE;
1839
1840 while (true) {
1841 /* Find, clear, then process each source of interrupt */
1842
1843 gt_iir = I915_READ(GTIIR);
1844 if (gt_iir)
1845 I915_WRITE(GTIIR, gt_iir);
1846
1847 pm_iir = I915_READ(GEN6_PMIIR);
1848 if (pm_iir)
1849 I915_WRITE(GEN6_PMIIR, pm_iir);
1850
1851 iir = I915_READ(VLV_IIR);
1852 if (iir) {
1853 /* Consume port before clearing IIR or we'll miss events */
1854 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1855 i9xx_hpd_irq_handler(dev);
1856 I915_WRITE(VLV_IIR, iir);
1857 }
1858
1859 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1860 goto out;
1861
1862 ret = IRQ_HANDLED;
1863
1864 if (gt_iir)
1865 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1866 if (pm_iir)
1867 gen6_rps_irq_handler(dev_priv, pm_iir);
1868 /* Call regardless, as some status bits might not be
1869 * signalled in iir */
1870 valleyview_pipestat_irq_handler(dev, iir);
1871 }
1872
1873 out:
1874 return ret;
1875 }
1876
1877 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1878 {
1879 struct drm_device *dev = arg;
1880 struct drm_i915_private *dev_priv = dev->dev_private;
1881 u32 master_ctl, iir;
1882 irqreturn_t ret = IRQ_NONE;
1883
1884 for (;;) {
1885 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1886 iir = I915_READ(VLV_IIR);
1887
1888 if (master_ctl == 0 && iir == 0)
1889 break;
1890
1891 ret = IRQ_HANDLED;
1892
1893 I915_WRITE(GEN8_MASTER_IRQ, 0);
1894
1895 /* Find, clear, then process each source of interrupt */
1896
1897 if (iir) {
1898 /* Consume port before clearing IIR or we'll miss events */
1899 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1900 i9xx_hpd_irq_handler(dev);
1901 I915_WRITE(VLV_IIR, iir);
1902 }
1903
1904 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1905
1906 /* Call regardless, as some status bits might not be
1907 * signalled in iir */
1908 valleyview_pipestat_irq_handler(dev, iir);
1909
1910 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1911 POSTING_READ(GEN8_MASTER_IRQ);
1912 }
1913
1914 return ret;
1915 }
1916
1917 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1918 {
1919 struct drm_i915_private *dev_priv = dev->dev_private;
1920 int pipe;
1921 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1922 u32 dig_hotplug_reg;
1923
1924 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1925 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1926
1927 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1928
1929 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1930 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1931 SDE_AUDIO_POWER_SHIFT);
1932 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1933 port_name(port));
1934 }
1935
1936 if (pch_iir & SDE_AUX_MASK)
1937 dp_aux_irq_handler(dev);
1938
1939 if (pch_iir & SDE_GMBUS)
1940 gmbus_irq_handler(dev);
1941
1942 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1943 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1944
1945 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1946 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1947
1948 if (pch_iir & SDE_POISON)
1949 DRM_ERROR("PCH poison interrupt\n");
1950
1951 if (pch_iir & SDE_FDI_MASK)
1952 for_each_pipe(dev_priv, pipe)
1953 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1954 pipe_name(pipe),
1955 I915_READ(FDI_RX_IIR(pipe)));
1956
1957 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1958 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1959
1960 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1961 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1962
1963 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1964 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1965
1966 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1967 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1968 }
1969
1970 static void ivb_err_int_handler(struct drm_device *dev)
1971 {
1972 struct drm_i915_private *dev_priv = dev->dev_private;
1973 u32 err_int = I915_READ(GEN7_ERR_INT);
1974 enum pipe pipe;
1975
1976 if (err_int & ERR_INT_POISON)
1977 DRM_ERROR("Poison interrupt\n");
1978
1979 for_each_pipe(dev_priv, pipe) {
1980 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1981 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1982
1983 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1984 if (IS_IVYBRIDGE(dev))
1985 ivb_pipe_crc_irq_handler(dev, pipe);
1986 else
1987 hsw_pipe_crc_irq_handler(dev, pipe);
1988 }
1989 }
1990
1991 I915_WRITE(GEN7_ERR_INT, err_int);
1992 }
1993
1994 static void cpt_serr_int_handler(struct drm_device *dev)
1995 {
1996 struct drm_i915_private *dev_priv = dev->dev_private;
1997 u32 serr_int = I915_READ(SERR_INT);
1998
1999 if (serr_int & SERR_INT_POISON)
2000 DRM_ERROR("PCH poison interrupt\n");
2001
2002 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2003 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2004
2005 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2006 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2007
2008 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2009 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2010
2011 I915_WRITE(SERR_INT, serr_int);
2012 }
2013
2014 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2015 {
2016 struct drm_i915_private *dev_priv = dev->dev_private;
2017 int pipe;
2018 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2019 u32 dig_hotplug_reg;
2020
2021 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2022 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2023
2024 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2025
2026 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2027 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2028 SDE_AUDIO_POWER_SHIFT_CPT);
2029 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2030 port_name(port));
2031 }
2032
2033 if (pch_iir & SDE_AUX_MASK_CPT)
2034 dp_aux_irq_handler(dev);
2035
2036 if (pch_iir & SDE_GMBUS_CPT)
2037 gmbus_irq_handler(dev);
2038
2039 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2040 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2041
2042 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2043 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2044
2045 if (pch_iir & SDE_FDI_MASK_CPT)
2046 for_each_pipe(dev_priv, pipe)
2047 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2048 pipe_name(pipe),
2049 I915_READ(FDI_RX_IIR(pipe)));
2050
2051 if (pch_iir & SDE_ERROR_CPT)
2052 cpt_serr_int_handler(dev);
2053 }
2054
2055 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2056 {
2057 struct drm_i915_private *dev_priv = dev->dev_private;
2058 enum pipe pipe;
2059
2060 if (de_iir & DE_AUX_CHANNEL_A)
2061 dp_aux_irq_handler(dev);
2062
2063 if (de_iir & DE_GSE)
2064 intel_opregion_asle_intr(dev);
2065
2066 if (de_iir & DE_POISON)
2067 DRM_ERROR("Poison interrupt\n");
2068
2069 for_each_pipe(dev_priv, pipe) {
2070 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2071 intel_pipe_handle_vblank(dev, pipe))
2072 intel_check_page_flip(dev, pipe);
2073
2074 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2075 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2076
2077 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2078 i9xx_pipe_crc_irq_handler(dev, pipe);
2079
2080 /* plane/pipes map 1:1 on ilk+ */
2081 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2082 intel_prepare_page_flip(dev, pipe);
2083 intel_finish_page_flip_plane(dev, pipe);
2084 }
2085 }
2086
2087 /* check event from PCH */
2088 if (de_iir & DE_PCH_EVENT) {
2089 u32 pch_iir = I915_READ(SDEIIR);
2090
2091 if (HAS_PCH_CPT(dev))
2092 cpt_irq_handler(dev, pch_iir);
2093 else
2094 ibx_irq_handler(dev, pch_iir);
2095
2096 /* should clear PCH hotplug event before clear CPU irq */
2097 I915_WRITE(SDEIIR, pch_iir);
2098 }
2099
2100 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2101 ironlake_rps_change_irq_handler(dev);
2102 }
2103
2104 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2105 {
2106 struct drm_i915_private *dev_priv = dev->dev_private;
2107 enum pipe pipe;
2108
2109 if (de_iir & DE_ERR_INT_IVB)
2110 ivb_err_int_handler(dev);
2111
2112 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2113 dp_aux_irq_handler(dev);
2114
2115 if (de_iir & DE_GSE_IVB)
2116 intel_opregion_asle_intr(dev);
2117
2118 for_each_pipe(dev_priv, pipe) {
2119 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2120 intel_pipe_handle_vblank(dev, pipe))
2121 intel_check_page_flip(dev, pipe);
2122
2123 /* plane/pipes map 1:1 on ilk+ */
2124 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2125 intel_prepare_page_flip(dev, pipe);
2126 intel_finish_page_flip_plane(dev, pipe);
2127 }
2128 }
2129
2130 /* check event from PCH */
2131 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2132 u32 pch_iir = I915_READ(SDEIIR);
2133
2134 cpt_irq_handler(dev, pch_iir);
2135
2136 /* clear PCH hotplug event before clear CPU irq */
2137 I915_WRITE(SDEIIR, pch_iir);
2138 }
2139 }
2140
2141 /*
2142 * To handle irqs with the minimum potential races with fresh interrupts, we:
2143 * 1 - Disable Master Interrupt Control.
2144 * 2 - Find the source(s) of the interrupt.
2145 * 3 - Clear the Interrupt Identity bits (IIR).
2146 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2147 * 5 - Re-enable Master Interrupt Control.
2148 */
2149 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2150 {
2151 struct drm_device *dev = arg;
2152 struct drm_i915_private *dev_priv = dev->dev_private;
2153 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2154 irqreturn_t ret = IRQ_NONE;
2155
2156 /* We get interrupts on unclaimed registers, so check for this before we
2157 * do any I915_{READ,WRITE}. */
2158 intel_uncore_check_errors(dev);
2159
2160 /* disable master interrupt before clearing iir */
2161 de_ier = I915_READ(DEIER);
2162 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2163 POSTING_READ(DEIER);
2164
2165 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2166 * interrupts will will be stored on its back queue, and then we'll be
2167 * able to process them after we restore SDEIER (as soon as we restore
2168 * it, we'll get an interrupt if SDEIIR still has something to process
2169 * due to its back queue). */
2170 if (!HAS_PCH_NOP(dev)) {
2171 sde_ier = I915_READ(SDEIER);
2172 I915_WRITE(SDEIER, 0);
2173 POSTING_READ(SDEIER);
2174 }
2175
2176 /* Find, clear, then process each source of interrupt */
2177
2178 gt_iir = I915_READ(GTIIR);
2179 if (gt_iir) {
2180 I915_WRITE(GTIIR, gt_iir);
2181 ret = IRQ_HANDLED;
2182 if (INTEL_INFO(dev)->gen >= 6)
2183 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2184 else
2185 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2186 }
2187
2188 de_iir = I915_READ(DEIIR);
2189 if (de_iir) {
2190 I915_WRITE(DEIIR, de_iir);
2191 ret = IRQ_HANDLED;
2192 if (INTEL_INFO(dev)->gen >= 7)
2193 ivb_display_irq_handler(dev, de_iir);
2194 else
2195 ilk_display_irq_handler(dev, de_iir);
2196 }
2197
2198 if (INTEL_INFO(dev)->gen >= 6) {
2199 u32 pm_iir = I915_READ(GEN6_PMIIR);
2200 if (pm_iir) {
2201 I915_WRITE(GEN6_PMIIR, pm_iir);
2202 ret = IRQ_HANDLED;
2203 gen6_rps_irq_handler(dev_priv, pm_iir);
2204 }
2205 }
2206
2207 I915_WRITE(DEIER, de_ier);
2208 POSTING_READ(DEIER);
2209 if (!HAS_PCH_NOP(dev)) {
2210 I915_WRITE(SDEIER, sde_ier);
2211 POSTING_READ(SDEIER);
2212 }
2213
2214 return ret;
2215 }
2216
2217 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2218 {
2219 struct drm_device *dev = arg;
2220 struct drm_i915_private *dev_priv = dev->dev_private;
2221 u32 master_ctl;
2222 irqreturn_t ret = IRQ_NONE;
2223 uint32_t tmp = 0;
2224 enum pipe pipe;
2225
2226 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2227 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2228 if (!master_ctl)
2229 return IRQ_NONE;
2230
2231 I915_WRITE(GEN8_MASTER_IRQ, 0);
2232 POSTING_READ(GEN8_MASTER_IRQ);
2233
2234 /* Find, clear, then process each source of interrupt */
2235
2236 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2237
2238 if (master_ctl & GEN8_DE_MISC_IRQ) {
2239 tmp = I915_READ(GEN8_DE_MISC_IIR);
2240 if (tmp) {
2241 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2242 ret = IRQ_HANDLED;
2243 if (tmp & GEN8_DE_MISC_GSE)
2244 intel_opregion_asle_intr(dev);
2245 else
2246 DRM_ERROR("Unexpected DE Misc interrupt\n");
2247 }
2248 else
2249 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2250 }
2251
2252 if (master_ctl & GEN8_DE_PORT_IRQ) {
2253 tmp = I915_READ(GEN8_DE_PORT_IIR);
2254 if (tmp) {
2255 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2256 ret = IRQ_HANDLED;
2257 if (tmp & GEN8_AUX_CHANNEL_A)
2258 dp_aux_irq_handler(dev);
2259 else
2260 DRM_ERROR("Unexpected DE Port interrupt\n");
2261 }
2262 else
2263 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2264 }
2265
2266 for_each_pipe(dev_priv, pipe) {
2267 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2268
2269 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2270 continue;
2271
2272 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2273 if (pipe_iir) {
2274 ret = IRQ_HANDLED;
2275 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2276
2277 if (pipe_iir & GEN8_PIPE_VBLANK &&
2278 intel_pipe_handle_vblank(dev, pipe))
2279 intel_check_page_flip(dev, pipe);
2280
2281 if (IS_GEN9(dev))
2282 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2283 else
2284 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2285
2286 if (flip_done) {
2287 intel_prepare_page_flip(dev, pipe);
2288 intel_finish_page_flip_plane(dev, pipe);
2289 }
2290
2291 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2292 hsw_pipe_crc_irq_handler(dev, pipe);
2293
2294 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2295 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2296 pipe);
2297
2298
2299 if (IS_GEN9(dev))
2300 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2301 else
2302 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2303
2304 if (fault_errors)
2305 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2306 pipe_name(pipe),
2307 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2308 } else
2309 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2310 }
2311
2312 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2313 /*
2314 * FIXME(BDW): Assume for now that the new interrupt handling
2315 * scheme also closed the SDE interrupt handling race we've seen
2316 * on older pch-split platforms. But this needs testing.
2317 */
2318 u32 pch_iir = I915_READ(SDEIIR);
2319 if (pch_iir) {
2320 I915_WRITE(SDEIIR, pch_iir);
2321 ret = IRQ_HANDLED;
2322 cpt_irq_handler(dev, pch_iir);
2323 } else
2324 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2325
2326 }
2327
2328 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2329 POSTING_READ(GEN8_MASTER_IRQ);
2330
2331 return ret;
2332 }
2333
2334 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2335 bool reset_completed)
2336 {
2337 struct intel_engine_cs *ring;
2338 int i;
2339
2340 /*
2341 * Notify all waiters for GPU completion events that reset state has
2342 * been changed, and that they need to restart their wait after
2343 * checking for potential errors (and bail out to drop locks if there is
2344 * a gpu reset pending so that i915_error_work_func can acquire them).
2345 */
2346
2347 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2348 for_each_ring(ring, dev_priv, i)
2349 wake_up_all(&ring->irq_queue);
2350
2351 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2352 wake_up_all(&dev_priv->pending_flip_queue);
2353
2354 /*
2355 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2356 * reset state is cleared.
2357 */
2358 if (reset_completed)
2359 wake_up_all(&dev_priv->gpu_error.reset_queue);
2360 }
2361
2362 /**
2363 * i915_error_work_func - do process context error handling work
2364 * @work: work struct
2365 *
2366 * Fire an error uevent so userspace can see that a hang or error
2367 * was detected.
2368 */
2369 static void i915_error_work_func(struct work_struct *work)
2370 {
2371 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2372 work);
2373 struct drm_i915_private *dev_priv =
2374 container_of(error, struct drm_i915_private, gpu_error);
2375 struct drm_device *dev = dev_priv->dev;
2376 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2377 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2378 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2379 int ret;
2380
2381 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2382
2383 /*
2384 * Note that there's only one work item which does gpu resets, so we
2385 * need not worry about concurrent gpu resets potentially incrementing
2386 * error->reset_counter twice. We only need to take care of another
2387 * racing irq/hangcheck declaring the gpu dead for a second time. A
2388 * quick check for that is good enough: schedule_work ensures the
2389 * correct ordering between hang detection and this work item, and since
2390 * the reset in-progress bit is only ever set by code outside of this
2391 * work we don't need to worry about any other races.
2392 */
2393 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2394 DRM_DEBUG_DRIVER("resetting chip\n");
2395 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2396 reset_event);
2397
2398 /*
2399 * In most cases it's guaranteed that we get here with an RPM
2400 * reference held, for example because there is a pending GPU
2401 * request that won't finish until the reset is done. This
2402 * isn't the case at least when we get here by doing a
2403 * simulated reset via debugs, so get an RPM reference.
2404 */
2405 intel_runtime_pm_get(dev_priv);
2406 /*
2407 * All state reset _must_ be completed before we update the
2408 * reset counter, for otherwise waiters might miss the reset
2409 * pending state and not properly drop locks, resulting in
2410 * deadlocks with the reset work.
2411 */
2412 ret = i915_reset(dev);
2413
2414 intel_display_handle_reset(dev);
2415
2416 intel_runtime_pm_put(dev_priv);
2417
2418 if (ret == 0) {
2419 /*
2420 * After all the gem state is reset, increment the reset
2421 * counter and wake up everyone waiting for the reset to
2422 * complete.
2423 *
2424 * Since unlock operations are a one-sided barrier only,
2425 * we need to insert a barrier here to order any seqno
2426 * updates before
2427 * the counter increment.
2428 */
2429 smp_mb__before_atomic();
2430 atomic_inc(&dev_priv->gpu_error.reset_counter);
2431
2432 kobject_uevent_env(&dev->primary->kdev->kobj,
2433 KOBJ_CHANGE, reset_done_event);
2434 } else {
2435 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2436 }
2437
2438 /*
2439 * Note: The wake_up also serves as a memory barrier so that
2440 * waiters see the update value of the reset counter atomic_t.
2441 */
2442 i915_error_wake_up(dev_priv, true);
2443 }
2444 }
2445
2446 static void i915_report_and_clear_eir(struct drm_device *dev)
2447 {
2448 struct drm_i915_private *dev_priv = dev->dev_private;
2449 uint32_t instdone[I915_NUM_INSTDONE_REG];
2450 u32 eir = I915_READ(EIR);
2451 int pipe, i;
2452
2453 if (!eir)
2454 return;
2455
2456 pr_err("render error detected, EIR: 0x%08x\n", eir);
2457
2458 i915_get_extra_instdone(dev, instdone);
2459
2460 if (IS_G4X(dev)) {
2461 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2462 u32 ipeir = I915_READ(IPEIR_I965);
2463
2464 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2465 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2466 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2467 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2468 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2469 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2470 I915_WRITE(IPEIR_I965, ipeir);
2471 POSTING_READ(IPEIR_I965);
2472 }
2473 if (eir & GM45_ERROR_PAGE_TABLE) {
2474 u32 pgtbl_err = I915_READ(PGTBL_ER);
2475 pr_err("page table error\n");
2476 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2477 I915_WRITE(PGTBL_ER, pgtbl_err);
2478 POSTING_READ(PGTBL_ER);
2479 }
2480 }
2481
2482 if (!IS_GEN2(dev)) {
2483 if (eir & I915_ERROR_PAGE_TABLE) {
2484 u32 pgtbl_err = I915_READ(PGTBL_ER);
2485 pr_err("page table error\n");
2486 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2487 I915_WRITE(PGTBL_ER, pgtbl_err);
2488 POSTING_READ(PGTBL_ER);
2489 }
2490 }
2491
2492 if (eir & I915_ERROR_MEMORY_REFRESH) {
2493 pr_err("memory refresh error:\n");
2494 for_each_pipe(dev_priv, pipe)
2495 pr_err("pipe %c stat: 0x%08x\n",
2496 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2497 /* pipestat has already been acked */
2498 }
2499 if (eir & I915_ERROR_INSTRUCTION) {
2500 pr_err("instruction error\n");
2501 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2502 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2503 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2504 if (INTEL_INFO(dev)->gen < 4) {
2505 u32 ipeir = I915_READ(IPEIR);
2506
2507 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2508 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2509 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2510 I915_WRITE(IPEIR, ipeir);
2511 POSTING_READ(IPEIR);
2512 } else {
2513 u32 ipeir = I915_READ(IPEIR_I965);
2514
2515 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2516 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2517 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2518 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2519 I915_WRITE(IPEIR_I965, ipeir);
2520 POSTING_READ(IPEIR_I965);
2521 }
2522 }
2523
2524 I915_WRITE(EIR, eir);
2525 POSTING_READ(EIR);
2526 eir = I915_READ(EIR);
2527 if (eir) {
2528 /*
2529 * some errors might have become stuck,
2530 * mask them.
2531 */
2532 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2533 I915_WRITE(EMR, I915_READ(EMR) | eir);
2534 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2535 }
2536 }
2537
2538 /**
2539 * i915_handle_error - handle an error interrupt
2540 * @dev: drm device
2541 *
2542 * Do some basic checking of regsiter state at error interrupt time and
2543 * dump it to the syslog. Also call i915_capture_error_state() to make
2544 * sure we get a record and make it available in debugfs. Fire a uevent
2545 * so userspace knows something bad happened (should trigger collection
2546 * of a ring dump etc.).
2547 */
2548 void i915_handle_error(struct drm_device *dev, bool wedged,
2549 const char *fmt, ...)
2550 {
2551 struct drm_i915_private *dev_priv = dev->dev_private;
2552 va_list args;
2553 char error_msg[80];
2554
2555 va_start(args, fmt);
2556 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2557 va_end(args);
2558
2559 i915_capture_error_state(dev, wedged, error_msg);
2560 i915_report_and_clear_eir(dev);
2561
2562 if (wedged) {
2563 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2564 &dev_priv->gpu_error.reset_counter);
2565
2566 /*
2567 * Wakeup waiting processes so that the reset work function
2568 * i915_error_work_func doesn't deadlock trying to grab various
2569 * locks. By bumping the reset counter first, the woken
2570 * processes will see a reset in progress and back off,
2571 * releasing their locks and then wait for the reset completion.
2572 * We must do this for _all_ gpu waiters that might hold locks
2573 * that the reset work needs to acquire.
2574 *
2575 * Note: The wake_up serves as the required memory barrier to
2576 * ensure that the waiters see the updated value of the reset
2577 * counter atomic_t.
2578 */
2579 i915_error_wake_up(dev_priv, false);
2580 }
2581
2582 /*
2583 * Our reset work can grab modeset locks (since it needs to reset the
2584 * state of outstanding pagelips). Hence it must not be run on our own
2585 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2586 * code will deadlock.
2587 */
2588 schedule_work(&dev_priv->gpu_error.work);
2589 }
2590
2591 /* Called from drm generic code, passed 'crtc' which
2592 * we use as a pipe index
2593 */
2594 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2595 {
2596 struct drm_i915_private *dev_priv = dev->dev_private;
2597 unsigned long irqflags;
2598
2599 if (!i915_pipe_enabled(dev, pipe))
2600 return -EINVAL;
2601
2602 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2603 if (INTEL_INFO(dev)->gen >= 4)
2604 i915_enable_pipestat(dev_priv, pipe,
2605 PIPE_START_VBLANK_INTERRUPT_STATUS);
2606 else
2607 i915_enable_pipestat(dev_priv, pipe,
2608 PIPE_VBLANK_INTERRUPT_STATUS);
2609 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2610
2611 return 0;
2612 }
2613
2614 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2615 {
2616 struct drm_i915_private *dev_priv = dev->dev_private;
2617 unsigned long irqflags;
2618 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2619 DE_PIPE_VBLANK(pipe);
2620
2621 if (!i915_pipe_enabled(dev, pipe))
2622 return -EINVAL;
2623
2624 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2625 ironlake_enable_display_irq(dev_priv, bit);
2626 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2627
2628 return 0;
2629 }
2630
2631 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2632 {
2633 struct drm_i915_private *dev_priv = dev->dev_private;
2634 unsigned long irqflags;
2635
2636 if (!i915_pipe_enabled(dev, pipe))
2637 return -EINVAL;
2638
2639 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2640 i915_enable_pipestat(dev_priv, pipe,
2641 PIPE_START_VBLANK_INTERRUPT_STATUS);
2642 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2643
2644 return 0;
2645 }
2646
2647 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2648 {
2649 struct drm_i915_private *dev_priv = dev->dev_private;
2650 unsigned long irqflags;
2651
2652 if (!i915_pipe_enabled(dev, pipe))
2653 return -EINVAL;
2654
2655 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2656 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2657 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2658 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2659 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2660 return 0;
2661 }
2662
2663 /* Called from drm generic code, passed 'crtc' which
2664 * we use as a pipe index
2665 */
2666 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2667 {
2668 struct drm_i915_private *dev_priv = dev->dev_private;
2669 unsigned long irqflags;
2670
2671 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2672 i915_disable_pipestat(dev_priv, pipe,
2673 PIPE_VBLANK_INTERRUPT_STATUS |
2674 PIPE_START_VBLANK_INTERRUPT_STATUS);
2675 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2676 }
2677
2678 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2679 {
2680 struct drm_i915_private *dev_priv = dev->dev_private;
2681 unsigned long irqflags;
2682 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2683 DE_PIPE_VBLANK(pipe);
2684
2685 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2686 ironlake_disable_display_irq(dev_priv, bit);
2687 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2688 }
2689
2690 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2691 {
2692 struct drm_i915_private *dev_priv = dev->dev_private;
2693 unsigned long irqflags;
2694
2695 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2696 i915_disable_pipestat(dev_priv, pipe,
2697 PIPE_START_VBLANK_INTERRUPT_STATUS);
2698 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2699 }
2700
2701 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2702 {
2703 struct drm_i915_private *dev_priv = dev->dev_private;
2704 unsigned long irqflags;
2705
2706 if (!i915_pipe_enabled(dev, pipe))
2707 return;
2708
2709 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2710 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2711 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2712 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2713 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2714 }
2715
2716 static u32
2717 ring_last_seqno(struct intel_engine_cs *ring)
2718 {
2719 return list_entry(ring->request_list.prev,
2720 struct drm_i915_gem_request, list)->seqno;
2721 }
2722
2723 static bool
2724 ring_idle(struct intel_engine_cs *ring, u32 seqno)
2725 {
2726 return (list_empty(&ring->request_list) ||
2727 i915_seqno_passed(seqno, ring_last_seqno(ring)));
2728 }
2729
2730 static bool
2731 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2732 {
2733 if (INTEL_INFO(dev)->gen >= 8) {
2734 return (ipehr >> 23) == 0x1c;
2735 } else {
2736 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2737 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2738 MI_SEMAPHORE_REGISTER);
2739 }
2740 }
2741
2742 static struct intel_engine_cs *
2743 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2744 {
2745 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2746 struct intel_engine_cs *signaller;
2747 int i;
2748
2749 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2750 for_each_ring(signaller, dev_priv, i) {
2751 if (ring == signaller)
2752 continue;
2753
2754 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2755 return signaller;
2756 }
2757 } else {
2758 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2759
2760 for_each_ring(signaller, dev_priv, i) {
2761 if(ring == signaller)
2762 continue;
2763
2764 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2765 return signaller;
2766 }
2767 }
2768
2769 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2770 ring->id, ipehr, offset);
2771
2772 return NULL;
2773 }
2774
2775 static struct intel_engine_cs *
2776 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2777 {
2778 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2779 u32 cmd, ipehr, head;
2780 u64 offset = 0;
2781 int i, backwards;
2782
2783 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2784 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2785 return NULL;
2786
2787 /*
2788 * HEAD is likely pointing to the dword after the actual command,
2789 * so scan backwards until we find the MBOX. But limit it to just 3
2790 * or 4 dwords depending on the semaphore wait command size.
2791 * Note that we don't care about ACTHD here since that might
2792 * point at at batch, and semaphores are always emitted into the
2793 * ringbuffer itself.
2794 */
2795 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2796 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2797
2798 for (i = backwards; i; --i) {
2799 /*
2800 * Be paranoid and presume the hw has gone off into the wild -
2801 * our ring is smaller than what the hardware (and hence
2802 * HEAD_ADDR) allows. Also handles wrap-around.
2803 */
2804 head &= ring->buffer->size - 1;
2805
2806 /* This here seems to blow up */
2807 cmd = ioread32(ring->buffer->virtual_start + head);
2808 if (cmd == ipehr)
2809 break;
2810
2811 head -= 4;
2812 }
2813
2814 if (!i)
2815 return NULL;
2816
2817 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2818 if (INTEL_INFO(ring->dev)->gen >= 8) {
2819 offset = ioread32(ring->buffer->virtual_start + head + 12);
2820 offset <<= 32;
2821 offset = ioread32(ring->buffer->virtual_start + head + 8);
2822 }
2823 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2824 }
2825
2826 static int semaphore_passed(struct intel_engine_cs *ring)
2827 {
2828 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2829 struct intel_engine_cs *signaller;
2830 u32 seqno;
2831
2832 ring->hangcheck.deadlock++;
2833
2834 signaller = semaphore_waits_for(ring, &seqno);
2835 if (signaller == NULL)
2836 return -1;
2837
2838 /* Prevent pathological recursion due to driver bugs */
2839 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2840 return -1;
2841
2842 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2843 return 1;
2844
2845 /* cursory check for an unkickable deadlock */
2846 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2847 semaphore_passed(signaller) < 0)
2848 return -1;
2849
2850 return 0;
2851 }
2852
2853 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2854 {
2855 struct intel_engine_cs *ring;
2856 int i;
2857
2858 for_each_ring(ring, dev_priv, i)
2859 ring->hangcheck.deadlock = 0;
2860 }
2861
2862 static enum intel_ring_hangcheck_action
2863 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2864 {
2865 struct drm_device *dev = ring->dev;
2866 struct drm_i915_private *dev_priv = dev->dev_private;
2867 u32 tmp;
2868
2869 if (acthd != ring->hangcheck.acthd) {
2870 if (acthd > ring->hangcheck.max_acthd) {
2871 ring->hangcheck.max_acthd = acthd;
2872 return HANGCHECK_ACTIVE;
2873 }
2874
2875 return HANGCHECK_ACTIVE_LOOP;
2876 }
2877
2878 if (IS_GEN2(dev))
2879 return HANGCHECK_HUNG;
2880
2881 /* Is the chip hanging on a WAIT_FOR_EVENT?
2882 * If so we can simply poke the RB_WAIT bit
2883 * and break the hang. This should work on
2884 * all but the second generation chipsets.
2885 */
2886 tmp = I915_READ_CTL(ring);
2887 if (tmp & RING_WAIT) {
2888 i915_handle_error(dev, false,
2889 "Kicking stuck wait on %s",
2890 ring->name);
2891 I915_WRITE_CTL(ring, tmp);
2892 return HANGCHECK_KICK;
2893 }
2894
2895 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2896 switch (semaphore_passed(ring)) {
2897 default:
2898 return HANGCHECK_HUNG;
2899 case 1:
2900 i915_handle_error(dev, false,
2901 "Kicking stuck semaphore on %s",
2902 ring->name);
2903 I915_WRITE_CTL(ring, tmp);
2904 return HANGCHECK_KICK;
2905 case 0:
2906 return HANGCHECK_WAIT;
2907 }
2908 }
2909
2910 return HANGCHECK_HUNG;
2911 }
2912
2913 /**
2914 * This is called when the chip hasn't reported back with completed
2915 * batchbuffers in a long time. We keep track per ring seqno progress and
2916 * if there are no progress, hangcheck score for that ring is increased.
2917 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2918 * we kick the ring. If we see no progress on three subsequent calls
2919 * we assume chip is wedged and try to fix it by resetting the chip.
2920 */
2921 static void i915_hangcheck_elapsed(unsigned long data)
2922 {
2923 struct drm_device *dev = (struct drm_device *)data;
2924 struct drm_i915_private *dev_priv = dev->dev_private;
2925 struct intel_engine_cs *ring;
2926 int i;
2927 int busy_count = 0, rings_hung = 0;
2928 bool stuck[I915_NUM_RINGS] = { 0 };
2929 #define BUSY 1
2930 #define KICK 5
2931 #define HUNG 20
2932
2933 if (!i915.enable_hangcheck)
2934 return;
2935
2936 for_each_ring(ring, dev_priv, i) {
2937 u64 acthd;
2938 u32 seqno;
2939 bool busy = true;
2940
2941 semaphore_clear_deadlocks(dev_priv);
2942
2943 seqno = ring->get_seqno(ring, false);
2944 acthd = intel_ring_get_active_head(ring);
2945
2946 if (ring->hangcheck.seqno == seqno) {
2947 if (ring_idle(ring, seqno)) {
2948 ring->hangcheck.action = HANGCHECK_IDLE;
2949
2950 if (waitqueue_active(&ring->irq_queue)) {
2951 /* Issue a wake-up to catch stuck h/w. */
2952 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
2953 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
2954 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2955 ring->name);
2956 else
2957 DRM_INFO("Fake missed irq on %s\n",
2958 ring->name);
2959 wake_up_all(&ring->irq_queue);
2960 }
2961 /* Safeguard against driver failure */
2962 ring->hangcheck.score += BUSY;
2963 } else
2964 busy = false;
2965 } else {
2966 /* We always increment the hangcheck score
2967 * if the ring is busy and still processing
2968 * the same request, so that no single request
2969 * can run indefinitely (such as a chain of
2970 * batches). The only time we do not increment
2971 * the hangcheck score on this ring, if this
2972 * ring is in a legitimate wait for another
2973 * ring. In that case the waiting ring is a
2974 * victim and we want to be sure we catch the
2975 * right culprit. Then every time we do kick
2976 * the ring, add a small increment to the
2977 * score so that we can catch a batch that is
2978 * being repeatedly kicked and so responsible
2979 * for stalling the machine.
2980 */
2981 ring->hangcheck.action = ring_stuck(ring,
2982 acthd);
2983
2984 switch (ring->hangcheck.action) {
2985 case HANGCHECK_IDLE:
2986 case HANGCHECK_WAIT:
2987 case HANGCHECK_ACTIVE:
2988 break;
2989 case HANGCHECK_ACTIVE_LOOP:
2990 ring->hangcheck.score += BUSY;
2991 break;
2992 case HANGCHECK_KICK:
2993 ring->hangcheck.score += KICK;
2994 break;
2995 case HANGCHECK_HUNG:
2996 ring->hangcheck.score += HUNG;
2997 stuck[i] = true;
2998 break;
2999 }
3000 }
3001 } else {
3002 ring->hangcheck.action = HANGCHECK_ACTIVE;
3003
3004 /* Gradually reduce the count so that we catch DoS
3005 * attempts across multiple batches.
3006 */
3007 if (ring->hangcheck.score > 0)
3008 ring->hangcheck.score--;
3009
3010 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3011 }
3012
3013 ring->hangcheck.seqno = seqno;
3014 ring->hangcheck.acthd = acthd;
3015 busy_count += busy;
3016 }
3017
3018 for_each_ring(ring, dev_priv, i) {
3019 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3020 DRM_INFO("%s on %s\n",
3021 stuck[i] ? "stuck" : "no progress",
3022 ring->name);
3023 rings_hung++;
3024 }
3025 }
3026
3027 if (rings_hung)
3028 return i915_handle_error(dev, true, "Ring hung");
3029
3030 if (busy_count)
3031 /* Reset timer case chip hangs without another request
3032 * being added */
3033 i915_queue_hangcheck(dev);
3034 }
3035
3036 void i915_queue_hangcheck(struct drm_device *dev)
3037 {
3038 struct drm_i915_private *dev_priv = dev->dev_private;
3039 if (!i915.enable_hangcheck)
3040 return;
3041
3042 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3043 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3044 }
3045
3046 static void ibx_irq_reset(struct drm_device *dev)
3047 {
3048 struct drm_i915_private *dev_priv = dev->dev_private;
3049
3050 if (HAS_PCH_NOP(dev))
3051 return;
3052
3053 GEN5_IRQ_RESET(SDE);
3054
3055 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3056 I915_WRITE(SERR_INT, 0xffffffff);
3057 }
3058
3059 /*
3060 * SDEIER is also touched by the interrupt handler to work around missed PCH
3061 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3062 * instead we unconditionally enable all PCH interrupt sources here, but then
3063 * only unmask them as needed with SDEIMR.
3064 *
3065 * This function needs to be called before interrupts are enabled.
3066 */
3067 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3068 {
3069 struct drm_i915_private *dev_priv = dev->dev_private;
3070
3071 if (HAS_PCH_NOP(dev))
3072 return;
3073
3074 WARN_ON(I915_READ(SDEIER) != 0);
3075 I915_WRITE(SDEIER, 0xffffffff);
3076 POSTING_READ(SDEIER);
3077 }
3078
3079 static void gen5_gt_irq_reset(struct drm_device *dev)
3080 {
3081 struct drm_i915_private *dev_priv = dev->dev_private;
3082
3083 GEN5_IRQ_RESET(GT);
3084 if (INTEL_INFO(dev)->gen >= 6)
3085 GEN5_IRQ_RESET(GEN6_PM);
3086 }
3087
3088 /* drm_dma.h hooks
3089 */
3090 static void ironlake_irq_reset(struct drm_device *dev)
3091 {
3092 struct drm_i915_private *dev_priv = dev->dev_private;
3093
3094 I915_WRITE(HWSTAM, 0xffffffff);
3095
3096 GEN5_IRQ_RESET(DE);
3097 if (IS_GEN7(dev))
3098 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3099
3100 gen5_gt_irq_reset(dev);
3101
3102 ibx_irq_reset(dev);
3103 }
3104
3105 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3106 {
3107 enum pipe pipe;
3108
3109 I915_WRITE(PORT_HOTPLUG_EN, 0);
3110 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3111
3112 for_each_pipe(dev_priv, pipe)
3113 I915_WRITE(PIPESTAT(pipe), 0xffff);
3114
3115 GEN5_IRQ_RESET(VLV_);
3116 }
3117
3118 static void valleyview_irq_preinstall(struct drm_device *dev)
3119 {
3120 struct drm_i915_private *dev_priv = dev->dev_private;
3121
3122 /* VLV magic */
3123 I915_WRITE(VLV_IMR, 0);
3124 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3125 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3126 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3127
3128 gen5_gt_irq_reset(dev);
3129
3130 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3131
3132 vlv_display_irq_reset(dev_priv);
3133 }
3134
3135 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3136 {
3137 GEN8_IRQ_RESET_NDX(GT, 0);
3138 GEN8_IRQ_RESET_NDX(GT, 1);
3139 GEN8_IRQ_RESET_NDX(GT, 2);
3140 GEN8_IRQ_RESET_NDX(GT, 3);
3141 }
3142
3143 static void gen8_irq_reset(struct drm_device *dev)
3144 {
3145 struct drm_i915_private *dev_priv = dev->dev_private;
3146 int pipe;
3147
3148 I915_WRITE(GEN8_MASTER_IRQ, 0);
3149 POSTING_READ(GEN8_MASTER_IRQ);
3150
3151 gen8_gt_irq_reset(dev_priv);
3152
3153 for_each_pipe(dev_priv, pipe)
3154 if (intel_display_power_is_enabled(dev_priv,
3155 POWER_DOMAIN_PIPE(pipe)))
3156 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3157
3158 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3159 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3160 GEN5_IRQ_RESET(GEN8_PCU_);
3161
3162 ibx_irq_reset(dev);
3163 }
3164
3165 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3166 {
3167 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3168
3169 spin_lock_irq(&dev_priv->irq_lock);
3170 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3171 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3172 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3173 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3174 spin_unlock_irq(&dev_priv->irq_lock);
3175 }
3176
3177 static void cherryview_irq_preinstall(struct drm_device *dev)
3178 {
3179 struct drm_i915_private *dev_priv = dev->dev_private;
3180
3181 I915_WRITE(GEN8_MASTER_IRQ, 0);
3182 POSTING_READ(GEN8_MASTER_IRQ);
3183
3184 gen8_gt_irq_reset(dev_priv);
3185
3186 GEN5_IRQ_RESET(GEN8_PCU_);
3187
3188 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3189
3190 vlv_display_irq_reset(dev_priv);
3191 }
3192
3193 static void ibx_hpd_irq_setup(struct drm_device *dev)
3194 {
3195 struct drm_i915_private *dev_priv = dev->dev_private;
3196 struct intel_encoder *intel_encoder;
3197 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3198
3199 if (HAS_PCH_IBX(dev)) {
3200 hotplug_irqs = SDE_HOTPLUG_MASK;
3201 for_each_intel_encoder(dev, intel_encoder)
3202 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3203 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3204 } else {
3205 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3206 for_each_intel_encoder(dev, intel_encoder)
3207 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3208 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3209 }
3210
3211 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3212
3213 /*
3214 * Enable digital hotplug on the PCH, and configure the DP short pulse
3215 * duration to 2ms (which is the minimum in the Display Port spec)
3216 *
3217 * This register is the same on all known PCH chips.
3218 */
3219 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3220 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3221 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3222 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3223 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3224 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3225 }
3226
3227 static void ibx_irq_postinstall(struct drm_device *dev)
3228 {
3229 struct drm_i915_private *dev_priv = dev->dev_private;
3230 u32 mask;
3231
3232 if (HAS_PCH_NOP(dev))
3233 return;
3234
3235 if (HAS_PCH_IBX(dev))
3236 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3237 else
3238 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3239
3240 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3241 I915_WRITE(SDEIMR, ~mask);
3242 }
3243
3244 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3245 {
3246 struct drm_i915_private *dev_priv = dev->dev_private;
3247 u32 pm_irqs, gt_irqs;
3248
3249 pm_irqs = gt_irqs = 0;
3250
3251 dev_priv->gt_irq_mask = ~0;
3252 if (HAS_L3_DPF(dev)) {
3253 /* L3 parity interrupt is always unmasked. */
3254 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3255 gt_irqs |= GT_PARITY_ERROR(dev);
3256 }
3257
3258 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3259 if (IS_GEN5(dev)) {
3260 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3261 ILK_BSD_USER_INTERRUPT;
3262 } else {
3263 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3264 }
3265
3266 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3267
3268 if (INTEL_INFO(dev)->gen >= 6) {
3269 pm_irqs |= dev_priv->pm_rps_events;
3270
3271 if (HAS_VEBOX(dev))
3272 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3273
3274 dev_priv->pm_irq_mask = 0xffffffff;
3275 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3276 }
3277 }
3278
3279 static int ironlake_irq_postinstall(struct drm_device *dev)
3280 {
3281 struct drm_i915_private *dev_priv = dev->dev_private;
3282 u32 display_mask, extra_mask;
3283
3284 if (INTEL_INFO(dev)->gen >= 7) {
3285 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3286 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3287 DE_PLANEB_FLIP_DONE_IVB |
3288 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3289 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3290 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3291 } else {
3292 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3293 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3294 DE_AUX_CHANNEL_A |
3295 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3296 DE_POISON);
3297 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3298 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3299 }
3300
3301 dev_priv->irq_mask = ~display_mask;
3302
3303 I915_WRITE(HWSTAM, 0xeffe);
3304
3305 ibx_irq_pre_postinstall(dev);
3306
3307 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3308
3309 gen5_gt_irq_postinstall(dev);
3310
3311 ibx_irq_postinstall(dev);
3312
3313 if (IS_IRONLAKE_M(dev)) {
3314 /* Enable PCU event interrupts
3315 *
3316 * spinlocking not required here for correctness since interrupt
3317 * setup is guaranteed to run in single-threaded context. But we
3318 * need it to make the assert_spin_locked happy. */
3319 spin_lock_irq(&dev_priv->irq_lock);
3320 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3321 spin_unlock_irq(&dev_priv->irq_lock);
3322 }
3323
3324 return 0;
3325 }
3326
3327 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3328 {
3329 u32 pipestat_mask;
3330 u32 iir_mask;
3331 enum pipe pipe;
3332
3333 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3334 PIPE_FIFO_UNDERRUN_STATUS;
3335
3336 for_each_pipe(dev_priv, pipe)
3337 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3338 POSTING_READ(PIPESTAT(PIPE_A));
3339
3340 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3341 PIPE_CRC_DONE_INTERRUPT_STATUS;
3342
3343 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3344 for_each_pipe(dev_priv, pipe)
3345 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3346
3347 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3348 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3349 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3350 if (IS_CHERRYVIEW(dev_priv))
3351 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3352 dev_priv->irq_mask &= ~iir_mask;
3353
3354 I915_WRITE(VLV_IIR, iir_mask);
3355 I915_WRITE(VLV_IIR, iir_mask);
3356 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3357 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3358 POSTING_READ(VLV_IMR);
3359 }
3360
3361 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3362 {
3363 u32 pipestat_mask;
3364 u32 iir_mask;
3365 enum pipe pipe;
3366
3367 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3368 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3369 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3370 if (IS_CHERRYVIEW(dev_priv))
3371 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3372
3373 dev_priv->irq_mask |= iir_mask;
3374 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3375 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3376 I915_WRITE(VLV_IIR, iir_mask);
3377 I915_WRITE(VLV_IIR, iir_mask);
3378 POSTING_READ(VLV_IIR);
3379
3380 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3381 PIPE_CRC_DONE_INTERRUPT_STATUS;
3382
3383 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3384 for_each_pipe(dev_priv, pipe)
3385 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3386
3387 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3388 PIPE_FIFO_UNDERRUN_STATUS;
3389
3390 for_each_pipe(dev_priv, pipe)
3391 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3392 POSTING_READ(PIPESTAT(PIPE_A));
3393 }
3394
3395 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3396 {
3397 assert_spin_locked(&dev_priv->irq_lock);
3398
3399 if (dev_priv->display_irqs_enabled)
3400 return;
3401
3402 dev_priv->display_irqs_enabled = true;
3403
3404 if (intel_irqs_enabled(dev_priv))
3405 valleyview_display_irqs_install(dev_priv);
3406 }
3407
3408 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3409 {
3410 assert_spin_locked(&dev_priv->irq_lock);
3411
3412 if (!dev_priv->display_irqs_enabled)
3413 return;
3414
3415 dev_priv->display_irqs_enabled = false;
3416
3417 if (intel_irqs_enabled(dev_priv))
3418 valleyview_display_irqs_uninstall(dev_priv);
3419 }
3420
3421 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3422 {
3423 dev_priv->irq_mask = ~0;
3424
3425 I915_WRITE(PORT_HOTPLUG_EN, 0);
3426 POSTING_READ(PORT_HOTPLUG_EN);
3427
3428 I915_WRITE(VLV_IIR, 0xffffffff);
3429 I915_WRITE(VLV_IIR, 0xffffffff);
3430 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3431 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3432 POSTING_READ(VLV_IMR);
3433
3434 /* Interrupt setup is already guaranteed to be single-threaded, this is
3435 * just to make the assert_spin_locked check happy. */
3436 spin_lock_irq(&dev_priv->irq_lock);
3437 if (dev_priv->display_irqs_enabled)
3438 valleyview_display_irqs_install(dev_priv);
3439 spin_unlock_irq(&dev_priv->irq_lock);
3440 }
3441
3442 static int valleyview_irq_postinstall(struct drm_device *dev)
3443 {
3444 struct drm_i915_private *dev_priv = dev->dev_private;
3445
3446 vlv_display_irq_postinstall(dev_priv);
3447
3448 gen5_gt_irq_postinstall(dev);
3449
3450 /* ack & enable invalid PTE error interrupts */
3451 #if 0 /* FIXME: add support to irq handler for checking these bits */
3452 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3453 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3454 #endif
3455
3456 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3457
3458 return 0;
3459 }
3460
3461 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3462 {
3463 /* These are interrupts we'll toggle with the ring mask register */
3464 uint32_t gt_interrupts[] = {
3465 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3466 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3467 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3468 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3469 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3470 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3471 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3472 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3473 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3474 0,
3475 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3476 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3477 };
3478
3479 dev_priv->pm_irq_mask = 0xffffffff;
3480 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3481 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3482 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3483 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3484 }
3485
3486 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3487 {
3488 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3489 uint32_t de_pipe_enables;
3490 int pipe;
3491
3492 if (IS_GEN9(dev_priv))
3493 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3494 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3495 else
3496 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3497 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3498
3499 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3500 GEN8_PIPE_FIFO_UNDERRUN;
3501
3502 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3503 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3504 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3505
3506 for_each_pipe(dev_priv, pipe)
3507 if (intel_display_power_is_enabled(dev_priv,
3508 POWER_DOMAIN_PIPE(pipe)))
3509 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3510 dev_priv->de_irq_mask[pipe],
3511 de_pipe_enables);
3512
3513 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3514 }
3515
3516 static int gen8_irq_postinstall(struct drm_device *dev)
3517 {
3518 struct drm_i915_private *dev_priv = dev->dev_private;
3519
3520 ibx_irq_pre_postinstall(dev);
3521
3522 gen8_gt_irq_postinstall(dev_priv);
3523 gen8_de_irq_postinstall(dev_priv);
3524
3525 ibx_irq_postinstall(dev);
3526
3527 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3528 POSTING_READ(GEN8_MASTER_IRQ);
3529
3530 return 0;
3531 }
3532
3533 static int cherryview_irq_postinstall(struct drm_device *dev)
3534 {
3535 struct drm_i915_private *dev_priv = dev->dev_private;
3536 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3537 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3538 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3539 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3540 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3541 PIPE_CRC_DONE_INTERRUPT_STATUS;
3542 int pipe;
3543
3544 /*
3545 * Leave vblank interrupts masked initially. enable/disable will
3546 * toggle them based on usage.
3547 */
3548 dev_priv->irq_mask = ~enable_mask;
3549
3550 for_each_pipe(dev_priv, pipe)
3551 I915_WRITE(PIPESTAT(pipe), 0xffff);
3552
3553 spin_lock_irq(&dev_priv->irq_lock);
3554 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3555 for_each_pipe(dev_priv, pipe)
3556 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3557 spin_unlock_irq(&dev_priv->irq_lock);
3558
3559 I915_WRITE(VLV_IIR, 0xffffffff);
3560 I915_WRITE(VLV_IIR, 0xffffffff);
3561 I915_WRITE(VLV_IER, enable_mask);
3562 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3563 POSTING_READ(VLV_IMR);
3564
3565 gen8_gt_irq_postinstall(dev_priv);
3566
3567 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3568 POSTING_READ(GEN8_MASTER_IRQ);
3569
3570 return 0;
3571 }
3572
3573 static void gen8_irq_uninstall(struct drm_device *dev)
3574 {
3575 struct drm_i915_private *dev_priv = dev->dev_private;
3576
3577 if (!dev_priv)
3578 return;
3579
3580 gen8_irq_reset(dev);
3581 }
3582
3583 static void valleyview_irq_uninstall(struct drm_device *dev)
3584 {
3585 struct drm_i915_private *dev_priv = dev->dev_private;
3586
3587 if (!dev_priv)
3588 return;
3589
3590 I915_WRITE(VLV_MASTER_IER, 0);
3591
3592 gen5_gt_irq_reset(dev);
3593
3594 I915_WRITE(HWSTAM, 0xffffffff);
3595
3596 /* Interrupt setup is already guaranteed to be single-threaded, this is
3597 * just to make the assert_spin_locked check happy. */
3598 spin_lock_irq(&dev_priv->irq_lock);
3599 if (dev_priv->display_irqs_enabled)
3600 valleyview_display_irqs_uninstall(dev_priv);
3601 spin_unlock_irq(&dev_priv->irq_lock);
3602
3603 vlv_display_irq_reset(dev_priv);
3604
3605 dev_priv->irq_mask = 0;
3606 }
3607
3608 static void cherryview_irq_uninstall(struct drm_device *dev)
3609 {
3610 struct drm_i915_private *dev_priv = dev->dev_private;
3611 int pipe;
3612
3613 if (!dev_priv)
3614 return;
3615
3616 I915_WRITE(GEN8_MASTER_IRQ, 0);
3617 POSTING_READ(GEN8_MASTER_IRQ);
3618
3619 gen8_gt_irq_reset(dev_priv);
3620
3621 GEN5_IRQ_RESET(GEN8_PCU_);
3622
3623 I915_WRITE(PORT_HOTPLUG_EN, 0);
3624 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3625
3626 for_each_pipe(dev_priv, pipe)
3627 I915_WRITE(PIPESTAT(pipe), 0xffff);
3628
3629 GEN5_IRQ_RESET(VLV_);
3630 }
3631
3632 static void ironlake_irq_uninstall(struct drm_device *dev)
3633 {
3634 struct drm_i915_private *dev_priv = dev->dev_private;
3635
3636 if (!dev_priv)
3637 return;
3638
3639 ironlake_irq_reset(dev);
3640 }
3641
3642 static void i8xx_irq_preinstall(struct drm_device * dev)
3643 {
3644 struct drm_i915_private *dev_priv = dev->dev_private;
3645 int pipe;
3646
3647 for_each_pipe(dev_priv, pipe)
3648 I915_WRITE(PIPESTAT(pipe), 0);
3649 I915_WRITE16(IMR, 0xffff);
3650 I915_WRITE16(IER, 0x0);
3651 POSTING_READ16(IER);
3652 }
3653
3654 static int i8xx_irq_postinstall(struct drm_device *dev)
3655 {
3656 struct drm_i915_private *dev_priv = dev->dev_private;
3657
3658 I915_WRITE16(EMR,
3659 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3660
3661 /* Unmask the interrupts that we always want on. */
3662 dev_priv->irq_mask =
3663 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3664 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3665 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3666 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3667 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3668 I915_WRITE16(IMR, dev_priv->irq_mask);
3669
3670 I915_WRITE16(IER,
3671 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3672 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3673 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3674 I915_USER_INTERRUPT);
3675 POSTING_READ16(IER);
3676
3677 /* Interrupt setup is already guaranteed to be single-threaded, this is
3678 * just to make the assert_spin_locked check happy. */
3679 spin_lock_irq(&dev_priv->irq_lock);
3680 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3681 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3682 spin_unlock_irq(&dev_priv->irq_lock);
3683
3684 return 0;
3685 }
3686
3687 /*
3688 * Returns true when a page flip has completed.
3689 */
3690 static bool i8xx_handle_vblank(struct drm_device *dev,
3691 int plane, int pipe, u32 iir)
3692 {
3693 struct drm_i915_private *dev_priv = dev->dev_private;
3694 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3695
3696 if (!intel_pipe_handle_vblank(dev, pipe))
3697 return false;
3698
3699 if ((iir & flip_pending) == 0)
3700 goto check_page_flip;
3701
3702 intel_prepare_page_flip(dev, plane);
3703
3704 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3705 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3706 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3707 * the flip is completed (no longer pending). Since this doesn't raise
3708 * an interrupt per se, we watch for the change at vblank.
3709 */
3710 if (I915_READ16(ISR) & flip_pending)
3711 goto check_page_flip;
3712
3713 intel_finish_page_flip(dev, pipe);
3714 return true;
3715
3716 check_page_flip:
3717 intel_check_page_flip(dev, pipe);
3718 return false;
3719 }
3720
3721 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3722 {
3723 struct drm_device *dev = arg;
3724 struct drm_i915_private *dev_priv = dev->dev_private;
3725 u16 iir, new_iir;
3726 u32 pipe_stats[2];
3727 int pipe;
3728 u16 flip_mask =
3729 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3730 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3731
3732 iir = I915_READ16(IIR);
3733 if (iir == 0)
3734 return IRQ_NONE;
3735
3736 while (iir & ~flip_mask) {
3737 /* Can't rely on pipestat interrupt bit in iir as it might
3738 * have been cleared after the pipestat interrupt was received.
3739 * It doesn't set the bit in iir again, but it still produces
3740 * interrupts (for non-MSI).
3741 */
3742 spin_lock(&dev_priv->irq_lock);
3743 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3744 i915_handle_error(dev, false,
3745 "Command parser error, iir 0x%08x",
3746 iir);
3747
3748 for_each_pipe(dev_priv, pipe) {
3749 int reg = PIPESTAT(pipe);
3750 pipe_stats[pipe] = I915_READ(reg);
3751
3752 /*
3753 * Clear the PIPE*STAT regs before the IIR
3754 */
3755 if (pipe_stats[pipe] & 0x8000ffff)
3756 I915_WRITE(reg, pipe_stats[pipe]);
3757 }
3758 spin_unlock(&dev_priv->irq_lock);
3759
3760 I915_WRITE16(IIR, iir & ~flip_mask);
3761 new_iir = I915_READ16(IIR); /* Flush posted writes */
3762
3763 i915_update_dri1_breadcrumb(dev);
3764
3765 if (iir & I915_USER_INTERRUPT)
3766 notify_ring(dev, &dev_priv->ring[RCS]);
3767
3768 for_each_pipe(dev_priv, pipe) {
3769 int plane = pipe;
3770 if (HAS_FBC(dev))
3771 plane = !plane;
3772
3773 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3774 i8xx_handle_vblank(dev, plane, pipe, iir))
3775 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3776
3777 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3778 i9xx_pipe_crc_irq_handler(dev, pipe);
3779
3780 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3781 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3782 pipe);
3783 }
3784
3785 iir = new_iir;
3786 }
3787
3788 return IRQ_HANDLED;
3789 }
3790
3791 static void i8xx_irq_uninstall(struct drm_device * dev)
3792 {
3793 struct drm_i915_private *dev_priv = dev->dev_private;
3794 int pipe;
3795
3796 for_each_pipe(dev_priv, pipe) {
3797 /* Clear enable bits; then clear status bits */
3798 I915_WRITE(PIPESTAT(pipe), 0);
3799 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3800 }
3801 I915_WRITE16(IMR, 0xffff);
3802 I915_WRITE16(IER, 0x0);
3803 I915_WRITE16(IIR, I915_READ16(IIR));
3804 }
3805
3806 static void i915_irq_preinstall(struct drm_device * dev)
3807 {
3808 struct drm_i915_private *dev_priv = dev->dev_private;
3809 int pipe;
3810
3811 if (I915_HAS_HOTPLUG(dev)) {
3812 I915_WRITE(PORT_HOTPLUG_EN, 0);
3813 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3814 }
3815
3816 I915_WRITE16(HWSTAM, 0xeffe);
3817 for_each_pipe(dev_priv, pipe)
3818 I915_WRITE(PIPESTAT(pipe), 0);
3819 I915_WRITE(IMR, 0xffffffff);
3820 I915_WRITE(IER, 0x0);
3821 POSTING_READ(IER);
3822 }
3823
3824 static int i915_irq_postinstall(struct drm_device *dev)
3825 {
3826 struct drm_i915_private *dev_priv = dev->dev_private;
3827 u32 enable_mask;
3828
3829 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3830
3831 /* Unmask the interrupts that we always want on. */
3832 dev_priv->irq_mask =
3833 ~(I915_ASLE_INTERRUPT |
3834 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3835 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3836 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3837 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3838 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3839
3840 enable_mask =
3841 I915_ASLE_INTERRUPT |
3842 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3843 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3844 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3845 I915_USER_INTERRUPT;
3846
3847 if (I915_HAS_HOTPLUG(dev)) {
3848 I915_WRITE(PORT_HOTPLUG_EN, 0);
3849 POSTING_READ(PORT_HOTPLUG_EN);
3850
3851 /* Enable in IER... */
3852 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3853 /* and unmask in IMR */
3854 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3855 }
3856
3857 I915_WRITE(IMR, dev_priv->irq_mask);
3858 I915_WRITE(IER, enable_mask);
3859 POSTING_READ(IER);
3860
3861 i915_enable_asle_pipestat(dev);
3862
3863 /* Interrupt setup is already guaranteed to be single-threaded, this is
3864 * just to make the assert_spin_locked check happy. */
3865 spin_lock_irq(&dev_priv->irq_lock);
3866 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3867 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3868 spin_unlock_irq(&dev_priv->irq_lock);
3869
3870 return 0;
3871 }
3872
3873 /*
3874 * Returns true when a page flip has completed.
3875 */
3876 static bool i915_handle_vblank(struct drm_device *dev,
3877 int plane, int pipe, u32 iir)
3878 {
3879 struct drm_i915_private *dev_priv = dev->dev_private;
3880 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3881
3882 if (!intel_pipe_handle_vblank(dev, pipe))
3883 return false;
3884
3885 if ((iir & flip_pending) == 0)
3886 goto check_page_flip;
3887
3888 intel_prepare_page_flip(dev, plane);
3889
3890 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3891 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3892 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3893 * the flip is completed (no longer pending). Since this doesn't raise
3894 * an interrupt per se, we watch for the change at vblank.
3895 */
3896 if (I915_READ(ISR) & flip_pending)
3897 goto check_page_flip;
3898
3899 intel_finish_page_flip(dev, pipe);
3900 return true;
3901
3902 check_page_flip:
3903 intel_check_page_flip(dev, pipe);
3904 return false;
3905 }
3906
3907 static irqreturn_t i915_irq_handler(int irq, void *arg)
3908 {
3909 struct drm_device *dev = arg;
3910 struct drm_i915_private *dev_priv = dev->dev_private;
3911 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3912 u32 flip_mask =
3913 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3914 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3915 int pipe, ret = IRQ_NONE;
3916
3917 iir = I915_READ(IIR);
3918 do {
3919 bool irq_received = (iir & ~flip_mask) != 0;
3920 bool blc_event = false;
3921
3922 /* Can't rely on pipestat interrupt bit in iir as it might
3923 * have been cleared after the pipestat interrupt was received.
3924 * It doesn't set the bit in iir again, but it still produces
3925 * interrupts (for non-MSI).
3926 */
3927 spin_lock(&dev_priv->irq_lock);
3928 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3929 i915_handle_error(dev, false,
3930 "Command parser error, iir 0x%08x",
3931 iir);
3932
3933 for_each_pipe(dev_priv, pipe) {
3934 int reg = PIPESTAT(pipe);
3935 pipe_stats[pipe] = I915_READ(reg);
3936
3937 /* Clear the PIPE*STAT regs before the IIR */
3938 if (pipe_stats[pipe] & 0x8000ffff) {
3939 I915_WRITE(reg, pipe_stats[pipe]);
3940 irq_received = true;
3941 }
3942 }
3943 spin_unlock(&dev_priv->irq_lock);
3944
3945 if (!irq_received)
3946 break;
3947
3948 /* Consume port. Then clear IIR or we'll miss events */
3949 if (I915_HAS_HOTPLUG(dev) &&
3950 iir & I915_DISPLAY_PORT_INTERRUPT)
3951 i9xx_hpd_irq_handler(dev);
3952
3953 I915_WRITE(IIR, iir & ~flip_mask);
3954 new_iir = I915_READ(IIR); /* Flush posted writes */
3955
3956 if (iir & I915_USER_INTERRUPT)
3957 notify_ring(dev, &dev_priv->ring[RCS]);
3958
3959 for_each_pipe(dev_priv, pipe) {
3960 int plane = pipe;
3961 if (HAS_FBC(dev))
3962 plane = !plane;
3963
3964 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3965 i915_handle_vblank(dev, plane, pipe, iir))
3966 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3967
3968 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3969 blc_event = true;
3970
3971 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3972 i9xx_pipe_crc_irq_handler(dev, pipe);
3973
3974 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3975 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3976 pipe);
3977 }
3978
3979 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3980 intel_opregion_asle_intr(dev);
3981
3982 /* With MSI, interrupts are only generated when iir
3983 * transitions from zero to nonzero. If another bit got
3984 * set while we were handling the existing iir bits, then
3985 * we would never get another interrupt.
3986 *
3987 * This is fine on non-MSI as well, as if we hit this path
3988 * we avoid exiting the interrupt handler only to generate
3989 * another one.
3990 *
3991 * Note that for MSI this could cause a stray interrupt report
3992 * if an interrupt landed in the time between writing IIR and
3993 * the posting read. This should be rare enough to never
3994 * trigger the 99% of 100,000 interrupts test for disabling
3995 * stray interrupts.
3996 */
3997 ret = IRQ_HANDLED;
3998 iir = new_iir;
3999 } while (iir & ~flip_mask);
4000
4001 i915_update_dri1_breadcrumb(dev);
4002
4003 return ret;
4004 }
4005
4006 static void i915_irq_uninstall(struct drm_device * dev)
4007 {
4008 struct drm_i915_private *dev_priv = dev->dev_private;
4009 int pipe;
4010
4011 if (I915_HAS_HOTPLUG(dev)) {
4012 I915_WRITE(PORT_HOTPLUG_EN, 0);
4013 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4014 }
4015
4016 I915_WRITE16(HWSTAM, 0xffff);
4017 for_each_pipe(dev_priv, pipe) {
4018 /* Clear enable bits; then clear status bits */
4019 I915_WRITE(PIPESTAT(pipe), 0);
4020 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4021 }
4022 I915_WRITE(IMR, 0xffffffff);
4023 I915_WRITE(IER, 0x0);
4024
4025 I915_WRITE(IIR, I915_READ(IIR));
4026 }
4027
4028 static void i965_irq_preinstall(struct drm_device * dev)
4029 {
4030 struct drm_i915_private *dev_priv = dev->dev_private;
4031 int pipe;
4032
4033 I915_WRITE(PORT_HOTPLUG_EN, 0);
4034 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4035
4036 I915_WRITE(HWSTAM, 0xeffe);
4037 for_each_pipe(dev_priv, pipe)
4038 I915_WRITE(PIPESTAT(pipe), 0);
4039 I915_WRITE(IMR, 0xffffffff);
4040 I915_WRITE(IER, 0x0);
4041 POSTING_READ(IER);
4042 }
4043
4044 static int i965_irq_postinstall(struct drm_device *dev)
4045 {
4046 struct drm_i915_private *dev_priv = dev->dev_private;
4047 u32 enable_mask;
4048 u32 error_mask;
4049
4050 /* Unmask the interrupts that we always want on. */
4051 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4052 I915_DISPLAY_PORT_INTERRUPT |
4053 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4054 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4055 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4056 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4057 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4058
4059 enable_mask = ~dev_priv->irq_mask;
4060 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4061 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4062 enable_mask |= I915_USER_INTERRUPT;
4063
4064 if (IS_G4X(dev))
4065 enable_mask |= I915_BSD_USER_INTERRUPT;
4066
4067 /* Interrupt setup is already guaranteed to be single-threaded, this is
4068 * just to make the assert_spin_locked check happy. */
4069 spin_lock_irq(&dev_priv->irq_lock);
4070 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4071 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4072 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4073 spin_unlock_irq(&dev_priv->irq_lock);
4074
4075 /*
4076 * Enable some error detection, note the instruction error mask
4077 * bit is reserved, so we leave it masked.
4078 */
4079 if (IS_G4X(dev)) {
4080 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4081 GM45_ERROR_MEM_PRIV |
4082 GM45_ERROR_CP_PRIV |
4083 I915_ERROR_MEMORY_REFRESH);
4084 } else {
4085 error_mask = ~(I915_ERROR_PAGE_TABLE |
4086 I915_ERROR_MEMORY_REFRESH);
4087 }
4088 I915_WRITE(EMR, error_mask);
4089
4090 I915_WRITE(IMR, dev_priv->irq_mask);
4091 I915_WRITE(IER, enable_mask);
4092 POSTING_READ(IER);
4093
4094 I915_WRITE(PORT_HOTPLUG_EN, 0);
4095 POSTING_READ(PORT_HOTPLUG_EN);
4096
4097 i915_enable_asle_pipestat(dev);
4098
4099 return 0;
4100 }
4101
4102 static void i915_hpd_irq_setup(struct drm_device *dev)
4103 {
4104 struct drm_i915_private *dev_priv = dev->dev_private;
4105 struct intel_encoder *intel_encoder;
4106 u32 hotplug_en;
4107
4108 assert_spin_locked(&dev_priv->irq_lock);
4109
4110 if (I915_HAS_HOTPLUG(dev)) {
4111 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4112 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4113 /* Note HDMI and DP share hotplug bits */
4114 /* enable bits are the same for all generations */
4115 for_each_intel_encoder(dev, intel_encoder)
4116 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4117 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4118 /* Programming the CRT detection parameters tends
4119 to generate a spurious hotplug event about three
4120 seconds later. So just do it once.
4121 */
4122 if (IS_G4X(dev))
4123 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4124 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4125 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4126
4127 /* Ignore TV since it's buggy */
4128 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4129 }
4130 }
4131
4132 static irqreturn_t i965_irq_handler(int irq, void *arg)
4133 {
4134 struct drm_device *dev = arg;
4135 struct drm_i915_private *dev_priv = dev->dev_private;
4136 u32 iir, new_iir;
4137 u32 pipe_stats[I915_MAX_PIPES];
4138 int ret = IRQ_NONE, pipe;
4139 u32 flip_mask =
4140 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4141 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4142
4143 iir = I915_READ(IIR);
4144
4145 for (;;) {
4146 bool irq_received = (iir & ~flip_mask) != 0;
4147 bool blc_event = false;
4148
4149 /* Can't rely on pipestat interrupt bit in iir as it might
4150 * have been cleared after the pipestat interrupt was received.
4151 * It doesn't set the bit in iir again, but it still produces
4152 * interrupts (for non-MSI).
4153 */
4154 spin_lock(&dev_priv->irq_lock);
4155 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4156 i915_handle_error(dev, false,
4157 "Command parser error, iir 0x%08x",
4158 iir);
4159
4160 for_each_pipe(dev_priv, pipe) {
4161 int reg = PIPESTAT(pipe);
4162 pipe_stats[pipe] = I915_READ(reg);
4163
4164 /*
4165 * Clear the PIPE*STAT regs before the IIR
4166 */
4167 if (pipe_stats[pipe] & 0x8000ffff) {
4168 I915_WRITE(reg, pipe_stats[pipe]);
4169 irq_received = true;
4170 }
4171 }
4172 spin_unlock(&dev_priv->irq_lock);
4173
4174 if (!irq_received)
4175 break;
4176
4177 ret = IRQ_HANDLED;
4178
4179 /* Consume port. Then clear IIR or we'll miss events */
4180 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4181 i9xx_hpd_irq_handler(dev);
4182
4183 I915_WRITE(IIR, iir & ~flip_mask);
4184 new_iir = I915_READ(IIR); /* Flush posted writes */
4185
4186 if (iir & I915_USER_INTERRUPT)
4187 notify_ring(dev, &dev_priv->ring[RCS]);
4188 if (iir & I915_BSD_USER_INTERRUPT)
4189 notify_ring(dev, &dev_priv->ring[VCS]);
4190
4191 for_each_pipe(dev_priv, pipe) {
4192 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4193 i915_handle_vblank(dev, pipe, pipe, iir))
4194 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4195
4196 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4197 blc_event = true;
4198
4199 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4200 i9xx_pipe_crc_irq_handler(dev, pipe);
4201
4202 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4203 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4204 }
4205
4206 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4207 intel_opregion_asle_intr(dev);
4208
4209 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4210 gmbus_irq_handler(dev);
4211
4212 /* With MSI, interrupts are only generated when iir
4213 * transitions from zero to nonzero. If another bit got
4214 * set while we were handling the existing iir bits, then
4215 * we would never get another interrupt.
4216 *
4217 * This is fine on non-MSI as well, as if we hit this path
4218 * we avoid exiting the interrupt handler only to generate
4219 * another one.
4220 *
4221 * Note that for MSI this could cause a stray interrupt report
4222 * if an interrupt landed in the time between writing IIR and
4223 * the posting read. This should be rare enough to never
4224 * trigger the 99% of 100,000 interrupts test for disabling
4225 * stray interrupts.
4226 */
4227 iir = new_iir;
4228 }
4229
4230 i915_update_dri1_breadcrumb(dev);
4231
4232 return ret;
4233 }
4234
4235 static void i965_irq_uninstall(struct drm_device * dev)
4236 {
4237 struct drm_i915_private *dev_priv = dev->dev_private;
4238 int pipe;
4239
4240 if (!dev_priv)
4241 return;
4242
4243 I915_WRITE(PORT_HOTPLUG_EN, 0);
4244 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4245
4246 I915_WRITE(HWSTAM, 0xffffffff);
4247 for_each_pipe(dev_priv, pipe)
4248 I915_WRITE(PIPESTAT(pipe), 0);
4249 I915_WRITE(IMR, 0xffffffff);
4250 I915_WRITE(IER, 0x0);
4251
4252 for_each_pipe(dev_priv, pipe)
4253 I915_WRITE(PIPESTAT(pipe),
4254 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4255 I915_WRITE(IIR, I915_READ(IIR));
4256 }
4257
4258 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4259 {
4260 struct drm_i915_private *dev_priv =
4261 container_of(work, typeof(*dev_priv),
4262 hotplug_reenable_work.work);
4263 struct drm_device *dev = dev_priv->dev;
4264 struct drm_mode_config *mode_config = &dev->mode_config;
4265 int i;
4266
4267 intel_runtime_pm_get(dev_priv);
4268
4269 spin_lock_irq(&dev_priv->irq_lock);
4270 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4271 struct drm_connector *connector;
4272
4273 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4274 continue;
4275
4276 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4277
4278 list_for_each_entry(connector, &mode_config->connector_list, head) {
4279 struct intel_connector *intel_connector = to_intel_connector(connector);
4280
4281 if (intel_connector->encoder->hpd_pin == i) {
4282 if (connector->polled != intel_connector->polled)
4283 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4284 connector->name);
4285 connector->polled = intel_connector->polled;
4286 if (!connector->polled)
4287 connector->polled = DRM_CONNECTOR_POLL_HPD;
4288 }
4289 }
4290 }
4291 if (dev_priv->display.hpd_irq_setup)
4292 dev_priv->display.hpd_irq_setup(dev);
4293 spin_unlock_irq(&dev_priv->irq_lock);
4294
4295 intel_runtime_pm_put(dev_priv);
4296 }
4297
4298 /**
4299 * intel_irq_init - initializes irq support
4300 * @dev_priv: i915 device instance
4301 *
4302 * This function initializes all the irq support including work items, timers
4303 * and all the vtables. It does not setup the interrupt itself though.
4304 */
4305 void intel_irq_init(struct drm_i915_private *dev_priv)
4306 {
4307 struct drm_device *dev = dev_priv->dev;
4308
4309 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4310 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4311 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4312 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4313 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4314
4315 /* Let's track the enabled rps events */
4316 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4317 /* WaGsvRC0ResidencyMethod:vlv */
4318 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4319 else
4320 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4321
4322 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4323 i915_hangcheck_elapsed,
4324 (unsigned long) dev);
4325 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4326 intel_hpd_irq_reenable_work);
4327
4328 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4329
4330 if (IS_GEN2(dev_priv)) {
4331 dev->max_vblank_count = 0;
4332 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4333 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4334 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4335 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4336 } else {
4337 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4338 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4339 }
4340
4341 /*
4342 * Opt out of the vblank disable timer on everything except gen2.
4343 * Gen2 doesn't have a hardware frame counter and so depends on
4344 * vblank interrupts to produce sane vblank seuquence numbers.
4345 */
4346 if (!IS_GEN2(dev_priv))
4347 dev->vblank_disable_immediate = true;
4348
4349 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4350 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4351 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4352 }
4353
4354 if (IS_CHERRYVIEW(dev_priv)) {
4355 dev->driver->irq_handler = cherryview_irq_handler;
4356 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4357 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4358 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4359 dev->driver->enable_vblank = valleyview_enable_vblank;
4360 dev->driver->disable_vblank = valleyview_disable_vblank;
4361 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4362 } else if (IS_VALLEYVIEW(dev_priv)) {
4363 dev->driver->irq_handler = valleyview_irq_handler;
4364 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4365 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4366 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4367 dev->driver->enable_vblank = valleyview_enable_vblank;
4368 dev->driver->disable_vblank = valleyview_disable_vblank;
4369 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4370 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4371 dev->driver->irq_handler = gen8_irq_handler;
4372 dev->driver->irq_preinstall = gen8_irq_reset;
4373 dev->driver->irq_postinstall = gen8_irq_postinstall;
4374 dev->driver->irq_uninstall = gen8_irq_uninstall;
4375 dev->driver->enable_vblank = gen8_enable_vblank;
4376 dev->driver->disable_vblank = gen8_disable_vblank;
4377 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4378 } else if (HAS_PCH_SPLIT(dev)) {
4379 dev->driver->irq_handler = ironlake_irq_handler;
4380 dev->driver->irq_preinstall = ironlake_irq_reset;
4381 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4382 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4383 dev->driver->enable_vblank = ironlake_enable_vblank;
4384 dev->driver->disable_vblank = ironlake_disable_vblank;
4385 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4386 } else {
4387 if (INTEL_INFO(dev_priv)->gen == 2) {
4388 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4389 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4390 dev->driver->irq_handler = i8xx_irq_handler;
4391 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4392 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4393 dev->driver->irq_preinstall = i915_irq_preinstall;
4394 dev->driver->irq_postinstall = i915_irq_postinstall;
4395 dev->driver->irq_uninstall = i915_irq_uninstall;
4396 dev->driver->irq_handler = i915_irq_handler;
4397 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4398 } else {
4399 dev->driver->irq_preinstall = i965_irq_preinstall;
4400 dev->driver->irq_postinstall = i965_irq_postinstall;
4401 dev->driver->irq_uninstall = i965_irq_uninstall;
4402 dev->driver->irq_handler = i965_irq_handler;
4403 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4404 }
4405 dev->driver->enable_vblank = i915_enable_vblank;
4406 dev->driver->disable_vblank = i915_disable_vblank;
4407 }
4408 }
4409
4410 /**
4411 * intel_hpd_init - initializes and enables hpd support
4412 * @dev_priv: i915 device instance
4413 *
4414 * This function enables the hotplug support. It requires that interrupts have
4415 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4416 * poll request can run concurrently to other code, so locking rules must be
4417 * obeyed.
4418 *
4419 * This is a separate step from interrupt enabling to simplify the locking rules
4420 * in the driver load and resume code.
4421 */
4422 void intel_hpd_init(struct drm_i915_private *dev_priv)
4423 {
4424 struct drm_device *dev = dev_priv->dev;
4425 struct drm_mode_config *mode_config = &dev->mode_config;
4426 struct drm_connector *connector;
4427 int i;
4428
4429 for (i = 1; i < HPD_NUM_PINS; i++) {
4430 dev_priv->hpd_stats[i].hpd_cnt = 0;
4431 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4432 }
4433 list_for_each_entry(connector, &mode_config->connector_list, head) {
4434 struct intel_connector *intel_connector = to_intel_connector(connector);
4435 connector->polled = intel_connector->polled;
4436 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4437 connector->polled = DRM_CONNECTOR_POLL_HPD;
4438 if (intel_connector->mst_port)
4439 connector->polled = DRM_CONNECTOR_POLL_HPD;
4440 }
4441
4442 /* Interrupt setup is already guaranteed to be single-threaded, this is
4443 * just to make the assert_spin_locked checks happy. */
4444 spin_lock_irq(&dev_priv->irq_lock);
4445 if (dev_priv->display.hpd_irq_setup)
4446 dev_priv->display.hpd_irq_setup(dev);
4447 spin_unlock_irq(&dev_priv->irq_lock);
4448 }
4449
4450 /**
4451 * intel_irq_install - enables the hardware interrupt
4452 * @dev_priv: i915 device instance
4453 *
4454 * This function enables the hardware interrupt handling, but leaves the hotplug
4455 * handling still disabled. It is called after intel_irq_init().
4456 *
4457 * In the driver load and resume code we need working interrupts in a few places
4458 * but don't want to deal with the hassle of concurrent probe and hotplug
4459 * workers. Hence the split into this two-stage approach.
4460 */
4461 int intel_irq_install(struct drm_i915_private *dev_priv)
4462 {
4463 /*
4464 * We enable some interrupt sources in our postinstall hooks, so mark
4465 * interrupts as enabled _before_ actually enabling them to avoid
4466 * special cases in our ordering checks.
4467 */
4468 dev_priv->pm.irqs_enabled = true;
4469
4470 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4471 }
4472
4473 /**
4474 * intel_irq_uninstall - finilizes all irq handling
4475 * @dev_priv: i915 device instance
4476 *
4477 * This stops interrupt and hotplug handling and unregisters and frees all
4478 * resources acquired in the init functions.
4479 */
4480 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4481 {
4482 drm_irq_uninstall(dev_priv->dev);
4483 intel_hpd_cancel_work(dev_priv);
4484 dev_priv->pm.irqs_enabled = false;
4485 }
4486
4487 /**
4488 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4489 * @dev_priv: i915 device instance
4490 *
4491 * This function is used to disable interrupts at runtime, both in the runtime
4492 * pm and the system suspend/resume code.
4493 */
4494 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4495 {
4496 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4497 dev_priv->pm.irqs_enabled = false;
4498 }
4499
4500 /**
4501 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4502 * @dev_priv: i915 device instance
4503 *
4504 * This function is used to enable interrupts at runtime, both in the runtime
4505 * pm and the system suspend/resume code.
4506 */
4507 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4508 {
4509 dev_priv->pm.irqs_enabled = true;
4510 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4511 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4512 }
This page took 0.133869 seconds and 6 git commands to generate.