drm: Clarify vblank ts/scanoutpos sampling #defines
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46 };
47
48 static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54 };
55
56 static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63 };
64
65 static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72 };
73
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81 };
82
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92 } while (0)
93
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
102 } while (0)
103
104 /*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117 } while (0)
118
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124 } while (0)
125
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131 } while (0)
132
133 /* For display hotplug interrupt */
134 static void
135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
136 {
137 assert_spin_locked(&dev_priv->irq_lock);
138
139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
140 return;
141
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
145 POSTING_READ(DEIMR);
146 }
147 }
148
149 static void
150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151 {
152 assert_spin_locked(&dev_priv->irq_lock);
153
154 if (!intel_irqs_enabled(dev_priv))
155 return;
156
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 POSTING_READ(DEIMR);
161 }
162 }
163
164 /**
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
169 */
170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
173 {
174 assert_spin_locked(&dev_priv->irq_lock);
175
176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
177 return;
178
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
182 POSTING_READ(GTIMR);
183 }
184
185 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186 {
187 ilk_update_gt_irq(dev_priv, mask, mask);
188 }
189
190 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191 {
192 ilk_update_gt_irq(dev_priv, mask, 0);
193 }
194
195 /**
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
200 */
201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204 {
205 uint32_t new_val;
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
210 return;
211
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
220 }
221 }
222
223 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224 {
225 snb_update_pm_irq(dev_priv, mask, mask);
226 }
227
228 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229 {
230 snb_update_pm_irq(dev_priv, mask, 0);
231 }
232
233 static bool ivb_can_enable_err_int(struct drm_device *dev)
234 {
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
239 assert_spin_locked(&dev_priv->irq_lock);
240
241 for_each_pipe(pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249 }
250
251 /**
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
256 *
257 * Copied from the snb function, updated with relevant register offsets
258 */
259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262 {
263 uint32_t new_val;
264
265 assert_spin_locked(&dev_priv->irq_lock);
266
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return;
269
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
273
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
278 }
279 }
280
281 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282 {
283 bdw_update_pm_irq(dev_priv, mask, mask);
284 }
285
286 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287 {
288 bdw_update_pm_irq(dev_priv, mask, 0);
289 }
290
291 static bool cpt_can_enable_serr_int(struct drm_device *dev)
292 {
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum pipe pipe;
295 struct intel_crtc *crtc;
296
297 assert_spin_locked(&dev_priv->irq_lock);
298
299 for_each_pipe(pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301
302 if (crtc->pch_fifo_underrun_disabled)
303 return false;
304 }
305
306 return true;
307 }
308
309 void i9xx_check_fifo_underruns(struct drm_device *dev)
310 {
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
313 unsigned long flags;
314
315 spin_lock_irqsave(&dev_priv->irq_lock, flags);
316
317 for_each_intel_crtc(dev, crtc) {
318 u32 reg = PIPESTAT(crtc->pipe);
319 u32 pipestat;
320
321 if (crtc->cpu_fifo_underrun_disabled)
322 continue;
323
324 pipestat = I915_READ(reg) & 0xffff0000;
325 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
326 continue;
327
328 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
329 POSTING_READ(reg);
330
331 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
332 }
333
334 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
335 }
336
337 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
338 enum pipe pipe,
339 bool enable, bool old)
340 {
341 struct drm_i915_private *dev_priv = dev->dev_private;
342 u32 reg = PIPESTAT(pipe);
343 u32 pipestat = I915_READ(reg) & 0xffff0000;
344
345 assert_spin_locked(&dev_priv->irq_lock);
346
347 if (enable) {
348 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
349 POSTING_READ(reg);
350 } else {
351 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
352 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
353 }
354 }
355
356 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
357 enum pipe pipe, bool enable)
358 {
359 struct drm_i915_private *dev_priv = dev->dev_private;
360 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
361 DE_PIPEB_FIFO_UNDERRUN;
362
363 if (enable)
364 ironlake_enable_display_irq(dev_priv, bit);
365 else
366 ironlake_disable_display_irq(dev_priv, bit);
367 }
368
369 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
370 enum pipe pipe,
371 bool enable, bool old)
372 {
373 struct drm_i915_private *dev_priv = dev->dev_private;
374 if (enable) {
375 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
376
377 if (!ivb_can_enable_err_int(dev))
378 return;
379
380 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
381 } else {
382 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
383
384 if (old &&
385 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
386 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
387 pipe_name(pipe));
388 }
389 }
390 }
391
392 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
393 enum pipe pipe, bool enable)
394 {
395 struct drm_i915_private *dev_priv = dev->dev_private;
396
397 assert_spin_locked(&dev_priv->irq_lock);
398
399 if (enable)
400 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
401 else
402 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
403 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
404 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
405 }
406
407 /**
408 * ibx_display_interrupt_update - update SDEIMR
409 * @dev_priv: driver private
410 * @interrupt_mask: mask of interrupt bits to update
411 * @enabled_irq_mask: mask of interrupt bits to enable
412 */
413 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
414 uint32_t interrupt_mask,
415 uint32_t enabled_irq_mask)
416 {
417 uint32_t sdeimr = I915_READ(SDEIMR);
418 sdeimr &= ~interrupt_mask;
419 sdeimr |= (~enabled_irq_mask & interrupt_mask);
420
421 assert_spin_locked(&dev_priv->irq_lock);
422
423 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
424 return;
425
426 I915_WRITE(SDEIMR, sdeimr);
427 POSTING_READ(SDEIMR);
428 }
429 #define ibx_enable_display_interrupt(dev_priv, bits) \
430 ibx_display_interrupt_update((dev_priv), (bits), (bits))
431 #define ibx_disable_display_interrupt(dev_priv, bits) \
432 ibx_display_interrupt_update((dev_priv), (bits), 0)
433
434 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
435 enum transcoder pch_transcoder,
436 bool enable)
437 {
438 struct drm_i915_private *dev_priv = dev->dev_private;
439 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
440 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
441
442 if (enable)
443 ibx_enable_display_interrupt(dev_priv, bit);
444 else
445 ibx_disable_display_interrupt(dev_priv, bit);
446 }
447
448 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
449 enum transcoder pch_transcoder,
450 bool enable, bool old)
451 {
452 struct drm_i915_private *dev_priv = dev->dev_private;
453
454 if (enable) {
455 I915_WRITE(SERR_INT,
456 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
457
458 if (!cpt_can_enable_serr_int(dev))
459 return;
460
461 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
462 } else {
463 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
464
465 if (old && I915_READ(SERR_INT) &
466 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
467 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
468 transcoder_name(pch_transcoder));
469 }
470 }
471 }
472
473 /**
474 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
475 * @dev: drm device
476 * @pipe: pipe
477 * @enable: true if we want to report FIFO underrun errors, false otherwise
478 *
479 * This function makes us disable or enable CPU fifo underruns for a specific
480 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
481 * reporting for one pipe may also disable all the other CPU error interruts for
482 * the other pipes, due to the fact that there's just one interrupt mask/enable
483 * bit for all the pipes.
484 *
485 * Returns the previous state of underrun reporting.
486 */
487 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
488 enum pipe pipe, bool enable)
489 {
490 struct drm_i915_private *dev_priv = dev->dev_private;
491 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
493 bool old;
494
495 assert_spin_locked(&dev_priv->irq_lock);
496
497 old = !intel_crtc->cpu_fifo_underrun_disabled;
498 intel_crtc->cpu_fifo_underrun_disabled = !enable;
499
500 if (INTEL_INFO(dev)->gen < 5 || IS_VALLEYVIEW(dev))
501 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
502 else if (IS_GEN5(dev) || IS_GEN6(dev))
503 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
504 else if (IS_GEN7(dev))
505 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
506 else if (IS_GEN8(dev))
507 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
508
509 return old;
510 }
511
512 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
513 enum pipe pipe, bool enable)
514 {
515 struct drm_i915_private *dev_priv = dev->dev_private;
516 unsigned long flags;
517 bool ret;
518
519 spin_lock_irqsave(&dev_priv->irq_lock, flags);
520 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
521 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
522
523 return ret;
524 }
525
526 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
527 enum pipe pipe)
528 {
529 struct drm_i915_private *dev_priv = dev->dev_private;
530 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
531 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
532
533 return !intel_crtc->cpu_fifo_underrun_disabled;
534 }
535
536 /**
537 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
538 * @dev: drm device
539 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
540 * @enable: true if we want to report FIFO underrun errors, false otherwise
541 *
542 * This function makes us disable or enable PCH fifo underruns for a specific
543 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
544 * underrun reporting for one transcoder may also disable all the other PCH
545 * error interruts for the other transcoders, due to the fact that there's just
546 * one interrupt mask/enable bit for all the transcoders.
547 *
548 * Returns the previous state of underrun reporting.
549 */
550 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
551 enum transcoder pch_transcoder,
552 bool enable)
553 {
554 struct drm_i915_private *dev_priv = dev->dev_private;
555 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
556 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
557 unsigned long flags;
558 bool old;
559
560 /*
561 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
562 * has only one pch transcoder A that all pipes can use. To avoid racy
563 * pch transcoder -> pipe lookups from interrupt code simply store the
564 * underrun statistics in crtc A. Since we never expose this anywhere
565 * nor use it outside of the fifo underrun code here using the "wrong"
566 * crtc on LPT won't cause issues.
567 */
568
569 spin_lock_irqsave(&dev_priv->irq_lock, flags);
570
571 old = !intel_crtc->pch_fifo_underrun_disabled;
572 intel_crtc->pch_fifo_underrun_disabled = !enable;
573
574 if (HAS_PCH_IBX(dev))
575 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
576 else
577 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
578
579 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
580 return old;
581 }
582
583
584 static void
585 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
586 u32 enable_mask, u32 status_mask)
587 {
588 u32 reg = PIPESTAT(pipe);
589 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
590
591 assert_spin_locked(&dev_priv->irq_lock);
592
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
597 return;
598
599 if ((pipestat & enable_mask) == enable_mask)
600 return;
601
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
603
604 /* Enable the interrupt, clear any pending status */
605 pipestat |= enable_mask | status_mask;
606 I915_WRITE(reg, pipestat);
607 POSTING_READ(reg);
608 }
609
610 static void
611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
613 {
614 u32 reg = PIPESTAT(pipe);
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
616
617 assert_spin_locked(&dev_priv->irq_lock);
618
619 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
620 status_mask & ~PIPESTAT_INT_STATUS_MASK,
621 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
622 pipe_name(pipe), enable_mask, status_mask))
623 return;
624
625 if ((pipestat & enable_mask) == 0)
626 return;
627
628 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
629
630 pipestat &= ~enable_mask;
631 I915_WRITE(reg, pipestat);
632 POSTING_READ(reg);
633 }
634
635 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
636 {
637 u32 enable_mask = status_mask << 16;
638
639 /*
640 * On pipe A we don't support the PSR interrupt yet,
641 * on pipe B and C the same bit MBZ.
642 */
643 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
644 return 0;
645 /*
646 * On pipe B and C we don't support the PSR interrupt yet, on pipe
647 * A the same bit is for perf counters which we don't use either.
648 */
649 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
650 return 0;
651
652 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
653 SPRITE0_FLIP_DONE_INT_EN_VLV |
654 SPRITE1_FLIP_DONE_INT_EN_VLV);
655 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
656 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
657 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
658 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
659
660 return enable_mask;
661 }
662
663 void
664 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
665 u32 status_mask)
666 {
667 u32 enable_mask;
668
669 if (IS_VALLEYVIEW(dev_priv->dev))
670 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
671 status_mask);
672 else
673 enable_mask = status_mask << 16;
674 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
675 }
676
677 void
678 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
679 u32 status_mask)
680 {
681 u32 enable_mask;
682
683 if (IS_VALLEYVIEW(dev_priv->dev))
684 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
685 status_mask);
686 else
687 enable_mask = status_mask << 16;
688 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
689 }
690
691 /**
692 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
693 */
694 static void i915_enable_asle_pipestat(struct drm_device *dev)
695 {
696 struct drm_i915_private *dev_priv = dev->dev_private;
697 unsigned long irqflags;
698
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
700 return;
701
702 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
703
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS);
708
709 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
710 }
711
712 /**
713 * i915_pipe_enabled - check if a pipe is enabled
714 * @dev: DRM device
715 * @pipe: pipe to check
716 *
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
720 */
721 static int
722 i915_pipe_enabled(struct drm_device *dev, int pipe)
723 {
724 struct drm_i915_private *dev_priv = dev->dev_private;
725
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
730
731 return intel_crtc->active;
732 } else {
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
734 }
735 }
736
737 /*
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
740 *
741 * Assumptions about the fictitious mode used in this example:
742 * vblank_start >= 3
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
746 *
747 * start of vblank:
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
751 * |
752 * | frame start:
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
755 * | |
756 * | | start of vsync:
757 * | | generate vsync interrupt
758 * | | |
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
766 * | | |
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
770 *
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
776 * vs = vertical sync
777 * vbs = vblank_start (number)
778 *
779 * Summary:
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
785 */
786
787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
788 {
789 /* Gen2 doesn't have a hardware frame counter */
790 return 0;
791 }
792
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
795 */
796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
797 {
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 unsigned long high_frame;
800 unsigned long low_frame;
801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
802
803 if (!i915_pipe_enabled(dev, pipe)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe));
806 return 0;
807 }
808
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
814
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
820 } else {
821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
822
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
829 }
830
831 /* Convert to pixel count */
832 vbl_start *= htotal;
833
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
836
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
839
840 /*
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
843 * register.
844 */
845 do {
846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
847 low = I915_READ(low_frame);
848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
849 } while (high1 != high2);
850
851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 pixel = low & PIPE_PIXEL_MASK;
853 low >>= PIPE_FRAME_LOW_SHIFT;
854
855 /*
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
859 */
860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
861 }
862
863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
864 {
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 int reg = PIPE_FRMCOUNT_GM45(pipe);
867
868 if (!i915_pipe_enabled(dev, pipe)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe));
871 return 0;
872 }
873
874 return I915_READ(reg);
875 }
876
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
879
880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
881 {
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
887
888 vtotal = mode->crtc_vtotal;
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
890 vtotal /= 2;
891
892 if (IS_GEN2(dev))
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
894 else
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
896
897 /*
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
900 */
901 return (position + crtc->scanline_offset) % vtotal;
902 }
903
904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
907 {
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
912 int position;
913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
914 bool in_vbl = true;
915 int ret = 0;
916 unsigned long irqflags;
917
918 if (!intel_crtc->active) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe));
921 return 0;
922 }
923
924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
929
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
932 vbl_end /= 2;
933 vtotal /= 2;
934 }
935
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
937
938 /*
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
942 */
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
944
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
946
947 /* Get optional system timestamp before query. */
948 if (stime)
949 *stime = ktime_get();
950
951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
954 */
955 position = __intel_get_crtc_scanline(intel_crtc);
956 } else {
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
959 * scanout position.
960 */
961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
962
963 /* convert to pixel counts */
964 vbl_start *= htotal;
965 vbl_end *= htotal;
966 vtotal *= htotal;
967
968 /*
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
976 */
977 if (position >= vtotal)
978 position = vtotal - 1;
979
980 /*
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
988 */
989 position = (position + htotal - hsync_start) % vtotal;
990 }
991
992 /* Get optional system timestamp after query. */
993 if (etime)
994 *etime = ktime_get();
995
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
997
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
999
1000 in_vbl = position >= vbl_start && position < vbl_end;
1001
1002 /*
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1006 * up since vbl_end.
1007 */
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1010 else
1011 position += vtotal - vbl_end;
1012
1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1014 *vpos = position;
1015 *hpos = 0;
1016 } else {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1019 }
1020
1021 /* In vblank? */
1022 if (in_vbl)
1023 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1024
1025 return ret;
1026 }
1027
1028 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1029 {
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1032 int position;
1033
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1037
1038 return position;
1039 }
1040
1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1042 int *max_error,
1043 struct timeval *vblank_time,
1044 unsigned flags)
1045 {
1046 struct drm_crtc *crtc;
1047
1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe);
1050 return -EINVAL;
1051 }
1052
1053 /* Get drm_crtc to timestamp: */
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1055 if (crtc == NULL) {
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1057 return -EINVAL;
1058 }
1059
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1062 return -EBUSY;
1063 }
1064
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1067 vblank_time, flags,
1068 crtc,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
1070 }
1071
1072 static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
1074 {
1075 enum drm_connector_status old_status;
1076
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1079
1080 connector->status = connector->funcs->detect(connector, false);
1081 if (old_status == connector->status)
1082 return false;
1083
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1085 connector->base.id,
1086 connector->name,
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1089
1090 return true;
1091 }
1092
1093 static void i915_digport_work_func(struct work_struct *work)
1094 {
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 unsigned long irqflags;
1098 u32 long_port_mask, short_port_mask;
1099 struct intel_digital_port *intel_dig_port;
1100 int i, ret;
1101 u32 old_bits = 0;
1102
1103 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1104 long_port_mask = dev_priv->long_hpd_port_mask;
1105 dev_priv->long_hpd_port_mask = 0;
1106 short_port_mask = dev_priv->short_hpd_port_mask;
1107 dev_priv->short_hpd_port_mask = 0;
1108 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1109
1110 for (i = 0; i < I915_MAX_PORTS; i++) {
1111 bool valid = false;
1112 bool long_hpd = false;
1113 intel_dig_port = dev_priv->hpd_irq_port[i];
1114 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1115 continue;
1116
1117 if (long_port_mask & (1 << i)) {
1118 valid = true;
1119 long_hpd = true;
1120 } else if (short_port_mask & (1 << i))
1121 valid = true;
1122
1123 if (valid) {
1124 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1125 if (ret == true) {
1126 /* if we get true fallback to old school hpd */
1127 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1128 }
1129 }
1130 }
1131
1132 if (old_bits) {
1133 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1134 dev_priv->hpd_event_bits |= old_bits;
1135 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1136 schedule_work(&dev_priv->hotplug_work);
1137 }
1138 }
1139
1140 /*
1141 * Handle hotplug events outside the interrupt handler proper.
1142 */
1143 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1144
1145 static void i915_hotplug_work_func(struct work_struct *work)
1146 {
1147 struct drm_i915_private *dev_priv =
1148 container_of(work, struct drm_i915_private, hotplug_work);
1149 struct drm_device *dev = dev_priv->dev;
1150 struct drm_mode_config *mode_config = &dev->mode_config;
1151 struct intel_connector *intel_connector;
1152 struct intel_encoder *intel_encoder;
1153 struct drm_connector *connector;
1154 unsigned long irqflags;
1155 bool hpd_disabled = false;
1156 bool changed = false;
1157 u32 hpd_event_bits;
1158
1159 mutex_lock(&mode_config->mutex);
1160 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1161
1162 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
1163
1164 hpd_event_bits = dev_priv->hpd_event_bits;
1165 dev_priv->hpd_event_bits = 0;
1166 list_for_each_entry(connector, &mode_config->connector_list, head) {
1167 intel_connector = to_intel_connector(connector);
1168 if (!intel_connector->encoder)
1169 continue;
1170 intel_encoder = intel_connector->encoder;
1171 if (intel_encoder->hpd_pin > HPD_NONE &&
1172 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1173 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1174 DRM_INFO("HPD interrupt storm detected on connector %s: "
1175 "switching from hotplug detection to polling\n",
1176 connector->name);
1177 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1178 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1179 | DRM_CONNECTOR_POLL_DISCONNECT;
1180 hpd_disabled = true;
1181 }
1182 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1183 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1184 connector->name, intel_encoder->hpd_pin);
1185 }
1186 }
1187 /* if there were no outputs to poll, poll was disabled,
1188 * therefore make sure it's enabled when disabling HPD on
1189 * some connectors */
1190 if (hpd_disabled) {
1191 drm_kms_helper_poll_enable(dev);
1192 mod_timer(&dev_priv->hotplug_reenable_timer,
1193 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1194 }
1195
1196 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1197
1198 list_for_each_entry(connector, &mode_config->connector_list, head) {
1199 intel_connector = to_intel_connector(connector);
1200 if (!intel_connector->encoder)
1201 continue;
1202 intel_encoder = intel_connector->encoder;
1203 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1204 if (intel_encoder->hot_plug)
1205 intel_encoder->hot_plug(intel_encoder);
1206 if (intel_hpd_irq_event(dev, connector))
1207 changed = true;
1208 }
1209 }
1210 mutex_unlock(&mode_config->mutex);
1211
1212 if (changed)
1213 drm_kms_helper_hotplug_event(dev);
1214 }
1215
1216 static void intel_hpd_irq_uninstall(struct drm_i915_private *dev_priv)
1217 {
1218 del_timer_sync(&dev_priv->hotplug_reenable_timer);
1219 }
1220
1221 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1222 {
1223 struct drm_i915_private *dev_priv = dev->dev_private;
1224 u32 busy_up, busy_down, max_avg, min_avg;
1225 u8 new_delay;
1226
1227 spin_lock(&mchdev_lock);
1228
1229 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1230
1231 new_delay = dev_priv->ips.cur_delay;
1232
1233 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1234 busy_up = I915_READ(RCPREVBSYTUPAVG);
1235 busy_down = I915_READ(RCPREVBSYTDNAVG);
1236 max_avg = I915_READ(RCBMAXAVG);
1237 min_avg = I915_READ(RCBMINAVG);
1238
1239 /* Handle RCS change request from hw */
1240 if (busy_up > max_avg) {
1241 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1242 new_delay = dev_priv->ips.cur_delay - 1;
1243 if (new_delay < dev_priv->ips.max_delay)
1244 new_delay = dev_priv->ips.max_delay;
1245 } else if (busy_down < min_avg) {
1246 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1247 new_delay = dev_priv->ips.cur_delay + 1;
1248 if (new_delay > dev_priv->ips.min_delay)
1249 new_delay = dev_priv->ips.min_delay;
1250 }
1251
1252 if (ironlake_set_drps(dev, new_delay))
1253 dev_priv->ips.cur_delay = new_delay;
1254
1255 spin_unlock(&mchdev_lock);
1256
1257 return;
1258 }
1259
1260 static void notify_ring(struct drm_device *dev,
1261 struct intel_engine_cs *ring)
1262 {
1263 if (!intel_ring_initialized(ring))
1264 return;
1265
1266 trace_i915_gem_request_complete(ring);
1267
1268 if (drm_core_check_feature(dev, DRIVER_MODESET))
1269 intel_notify_mmio_flip(ring);
1270
1271 wake_up_all(&ring->irq_queue);
1272 i915_queue_hangcheck(dev);
1273 }
1274
1275 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1276 struct intel_rps_ei *rps_ei)
1277 {
1278 u32 cz_ts, cz_freq_khz;
1279 u32 render_count, media_count;
1280 u32 elapsed_render, elapsed_media, elapsed_time;
1281 u32 residency = 0;
1282
1283 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1284 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1285
1286 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1287 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1288
1289 if (rps_ei->cz_clock == 0) {
1290 rps_ei->cz_clock = cz_ts;
1291 rps_ei->render_c0 = render_count;
1292 rps_ei->media_c0 = media_count;
1293
1294 return dev_priv->rps.cur_freq;
1295 }
1296
1297 elapsed_time = cz_ts - rps_ei->cz_clock;
1298 rps_ei->cz_clock = cz_ts;
1299
1300 elapsed_render = render_count - rps_ei->render_c0;
1301 rps_ei->render_c0 = render_count;
1302
1303 elapsed_media = media_count - rps_ei->media_c0;
1304 rps_ei->media_c0 = media_count;
1305
1306 /* Convert all the counters into common unit of milli sec */
1307 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1308 elapsed_render /= cz_freq_khz;
1309 elapsed_media /= cz_freq_khz;
1310
1311 /*
1312 * Calculate overall C0 residency percentage
1313 * only if elapsed time is non zero
1314 */
1315 if (elapsed_time) {
1316 residency =
1317 ((max(elapsed_render, elapsed_media) * 100)
1318 / elapsed_time);
1319 }
1320
1321 return residency;
1322 }
1323
1324 /**
1325 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1326 * busy-ness calculated from C0 counters of render & media power wells
1327 * @dev_priv: DRM device private
1328 *
1329 */
1330 static u32 vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1331 {
1332 u32 residency_C0_up = 0, residency_C0_down = 0;
1333 u8 new_delay, adj;
1334
1335 dev_priv->rps.ei_interrupt_count++;
1336
1337 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1338
1339
1340 if (dev_priv->rps.up_ei.cz_clock == 0) {
1341 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1342 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1343 return dev_priv->rps.cur_freq;
1344 }
1345
1346
1347 /*
1348 * To down throttle, C0 residency should be less than down threshold
1349 * for continous EI intervals. So calculate down EI counters
1350 * once in VLV_INT_COUNT_FOR_DOWN_EI
1351 */
1352 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1353
1354 dev_priv->rps.ei_interrupt_count = 0;
1355
1356 residency_C0_down = vlv_c0_residency(dev_priv,
1357 &dev_priv->rps.down_ei);
1358 } else {
1359 residency_C0_up = vlv_c0_residency(dev_priv,
1360 &dev_priv->rps.up_ei);
1361 }
1362
1363 new_delay = dev_priv->rps.cur_freq;
1364
1365 adj = dev_priv->rps.last_adj;
1366 /* C0 residency is greater than UP threshold. Increase Frequency */
1367 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1368 if (adj > 0)
1369 adj *= 2;
1370 else
1371 adj = 1;
1372
1373 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1374 new_delay = dev_priv->rps.cur_freq + adj;
1375
1376 /*
1377 * For better performance, jump directly
1378 * to RPe if we're below it.
1379 */
1380 if (new_delay < dev_priv->rps.efficient_freq)
1381 new_delay = dev_priv->rps.efficient_freq;
1382
1383 } else if (!dev_priv->rps.ei_interrupt_count &&
1384 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1385 if (adj < 0)
1386 adj *= 2;
1387 else
1388 adj = -1;
1389 /*
1390 * This means, C0 residency is less than down threshold over
1391 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1392 */
1393 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1394 new_delay = dev_priv->rps.cur_freq + adj;
1395 }
1396
1397 return new_delay;
1398 }
1399
1400 static void gen6_pm_rps_work(struct work_struct *work)
1401 {
1402 struct drm_i915_private *dev_priv =
1403 container_of(work, struct drm_i915_private, rps.work);
1404 u32 pm_iir;
1405 int new_delay, adj;
1406
1407 spin_lock_irq(&dev_priv->irq_lock);
1408 pm_iir = dev_priv->rps.pm_iir;
1409 dev_priv->rps.pm_iir = 0;
1410 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1411 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1412 else {
1413 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1414 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1415 }
1416 spin_unlock_irq(&dev_priv->irq_lock);
1417
1418 /* Make sure we didn't queue anything we're not going to process. */
1419 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1420
1421 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1422 return;
1423
1424 mutex_lock(&dev_priv->rps.hw_lock);
1425
1426 adj = dev_priv->rps.last_adj;
1427 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1428 if (adj > 0)
1429 adj *= 2;
1430 else {
1431 /* CHV needs even encode values */
1432 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1433 }
1434 new_delay = dev_priv->rps.cur_freq + adj;
1435
1436 /*
1437 * For better performance, jump directly
1438 * to RPe if we're below it.
1439 */
1440 if (new_delay < dev_priv->rps.efficient_freq)
1441 new_delay = dev_priv->rps.efficient_freq;
1442 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1443 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1444 new_delay = dev_priv->rps.efficient_freq;
1445 else
1446 new_delay = dev_priv->rps.min_freq_softlimit;
1447 adj = 0;
1448 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1449 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1450 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1451 if (adj < 0)
1452 adj *= 2;
1453 else {
1454 /* CHV needs even encode values */
1455 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1456 }
1457 new_delay = dev_priv->rps.cur_freq + adj;
1458 } else { /* unknown event */
1459 new_delay = dev_priv->rps.cur_freq;
1460 }
1461
1462 /* sysfs frequency interfaces may have snuck in while servicing the
1463 * interrupt
1464 */
1465 new_delay = clamp_t(int, new_delay,
1466 dev_priv->rps.min_freq_softlimit,
1467 dev_priv->rps.max_freq_softlimit);
1468
1469 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1470
1471 if (IS_VALLEYVIEW(dev_priv->dev))
1472 valleyview_set_rps(dev_priv->dev, new_delay);
1473 else
1474 gen6_set_rps(dev_priv->dev, new_delay);
1475
1476 mutex_unlock(&dev_priv->rps.hw_lock);
1477 }
1478
1479
1480 /**
1481 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1482 * occurred.
1483 * @work: workqueue struct
1484 *
1485 * Doesn't actually do anything except notify userspace. As a consequence of
1486 * this event, userspace should try to remap the bad rows since statistically
1487 * it is likely the same row is more likely to go bad again.
1488 */
1489 static void ivybridge_parity_work(struct work_struct *work)
1490 {
1491 struct drm_i915_private *dev_priv =
1492 container_of(work, struct drm_i915_private, l3_parity.error_work);
1493 u32 error_status, row, bank, subbank;
1494 char *parity_event[6];
1495 uint32_t misccpctl;
1496 unsigned long flags;
1497 uint8_t slice = 0;
1498
1499 /* We must turn off DOP level clock gating to access the L3 registers.
1500 * In order to prevent a get/put style interface, acquire struct mutex
1501 * any time we access those registers.
1502 */
1503 mutex_lock(&dev_priv->dev->struct_mutex);
1504
1505 /* If we've screwed up tracking, just let the interrupt fire again */
1506 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1507 goto out;
1508
1509 misccpctl = I915_READ(GEN7_MISCCPCTL);
1510 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1511 POSTING_READ(GEN7_MISCCPCTL);
1512
1513 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1514 u32 reg;
1515
1516 slice--;
1517 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1518 break;
1519
1520 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1521
1522 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1523
1524 error_status = I915_READ(reg);
1525 row = GEN7_PARITY_ERROR_ROW(error_status);
1526 bank = GEN7_PARITY_ERROR_BANK(error_status);
1527 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1528
1529 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1530 POSTING_READ(reg);
1531
1532 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1533 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1534 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1535 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1536 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1537 parity_event[5] = NULL;
1538
1539 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1540 KOBJ_CHANGE, parity_event);
1541
1542 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1543 slice, row, bank, subbank);
1544
1545 kfree(parity_event[4]);
1546 kfree(parity_event[3]);
1547 kfree(parity_event[2]);
1548 kfree(parity_event[1]);
1549 }
1550
1551 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1552
1553 out:
1554 WARN_ON(dev_priv->l3_parity.which_slice);
1555 spin_lock_irqsave(&dev_priv->irq_lock, flags);
1556 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1557 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
1558
1559 mutex_unlock(&dev_priv->dev->struct_mutex);
1560 }
1561
1562 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1563 {
1564 struct drm_i915_private *dev_priv = dev->dev_private;
1565
1566 if (!HAS_L3_DPF(dev))
1567 return;
1568
1569 spin_lock(&dev_priv->irq_lock);
1570 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1571 spin_unlock(&dev_priv->irq_lock);
1572
1573 iir &= GT_PARITY_ERROR(dev);
1574 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1575 dev_priv->l3_parity.which_slice |= 1 << 1;
1576
1577 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1578 dev_priv->l3_parity.which_slice |= 1 << 0;
1579
1580 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1581 }
1582
1583 static void ilk_gt_irq_handler(struct drm_device *dev,
1584 struct drm_i915_private *dev_priv,
1585 u32 gt_iir)
1586 {
1587 if (gt_iir &
1588 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1589 notify_ring(dev, &dev_priv->ring[RCS]);
1590 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1591 notify_ring(dev, &dev_priv->ring[VCS]);
1592 }
1593
1594 static void snb_gt_irq_handler(struct drm_device *dev,
1595 struct drm_i915_private *dev_priv,
1596 u32 gt_iir)
1597 {
1598
1599 if (gt_iir &
1600 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1601 notify_ring(dev, &dev_priv->ring[RCS]);
1602 if (gt_iir & GT_BSD_USER_INTERRUPT)
1603 notify_ring(dev, &dev_priv->ring[VCS]);
1604 if (gt_iir & GT_BLT_USER_INTERRUPT)
1605 notify_ring(dev, &dev_priv->ring[BCS]);
1606
1607 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1608 GT_BSD_CS_ERROR_INTERRUPT |
1609 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1610 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1611 gt_iir);
1612 }
1613
1614 if (gt_iir & GT_PARITY_ERROR(dev))
1615 ivybridge_parity_error_irq_handler(dev, gt_iir);
1616 }
1617
1618 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1619 {
1620 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1621 return;
1622
1623 spin_lock(&dev_priv->irq_lock);
1624 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1625 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1626 spin_unlock(&dev_priv->irq_lock);
1627
1628 queue_work(dev_priv->wq, &dev_priv->rps.work);
1629 }
1630
1631 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1632 struct drm_i915_private *dev_priv,
1633 u32 master_ctl)
1634 {
1635 u32 rcs, bcs, vcs;
1636 uint32_t tmp = 0;
1637 irqreturn_t ret = IRQ_NONE;
1638
1639 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1640 tmp = I915_READ(GEN8_GT_IIR(0));
1641 if (tmp) {
1642 I915_WRITE(GEN8_GT_IIR(0), tmp);
1643 ret = IRQ_HANDLED;
1644 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1646 if (rcs & GT_RENDER_USER_INTERRUPT)
1647 notify_ring(dev, &dev_priv->ring[RCS]);
1648 if (bcs & GT_RENDER_USER_INTERRUPT)
1649 notify_ring(dev, &dev_priv->ring[BCS]);
1650 } else
1651 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1652 }
1653
1654 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1655 tmp = I915_READ(GEN8_GT_IIR(1));
1656 if (tmp) {
1657 I915_WRITE(GEN8_GT_IIR(1), tmp);
1658 ret = IRQ_HANDLED;
1659 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1660 if (vcs & GT_RENDER_USER_INTERRUPT)
1661 notify_ring(dev, &dev_priv->ring[VCS]);
1662 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1663 if (vcs & GT_RENDER_USER_INTERRUPT)
1664 notify_ring(dev, &dev_priv->ring[VCS2]);
1665 } else
1666 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1667 }
1668
1669 if (master_ctl & GEN8_GT_PM_IRQ) {
1670 tmp = I915_READ(GEN8_GT_IIR(2));
1671 if (tmp & dev_priv->pm_rps_events) {
1672 I915_WRITE(GEN8_GT_IIR(2),
1673 tmp & dev_priv->pm_rps_events);
1674 ret = IRQ_HANDLED;
1675 gen8_rps_irq_handler(dev_priv, tmp);
1676 } else
1677 DRM_ERROR("The master control interrupt lied (PM)!\n");
1678 }
1679
1680 if (master_ctl & GEN8_GT_VECS_IRQ) {
1681 tmp = I915_READ(GEN8_GT_IIR(3));
1682 if (tmp) {
1683 I915_WRITE(GEN8_GT_IIR(3), tmp);
1684 ret = IRQ_HANDLED;
1685 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1686 if (vcs & GT_RENDER_USER_INTERRUPT)
1687 notify_ring(dev, &dev_priv->ring[VECS]);
1688 } else
1689 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1690 }
1691
1692 return ret;
1693 }
1694
1695 #define HPD_STORM_DETECT_PERIOD 1000
1696 #define HPD_STORM_THRESHOLD 5
1697
1698 static int ilk_port_to_hotplug_shift(enum port port)
1699 {
1700 switch (port) {
1701 case PORT_A:
1702 case PORT_E:
1703 default:
1704 return -1;
1705 case PORT_B:
1706 return 0;
1707 case PORT_C:
1708 return 8;
1709 case PORT_D:
1710 return 16;
1711 }
1712 }
1713
1714 static int g4x_port_to_hotplug_shift(enum port port)
1715 {
1716 switch (port) {
1717 case PORT_A:
1718 case PORT_E:
1719 default:
1720 return -1;
1721 case PORT_B:
1722 return 17;
1723 case PORT_C:
1724 return 19;
1725 case PORT_D:
1726 return 21;
1727 }
1728 }
1729
1730 static inline enum port get_port_from_pin(enum hpd_pin pin)
1731 {
1732 switch (pin) {
1733 case HPD_PORT_B:
1734 return PORT_B;
1735 case HPD_PORT_C:
1736 return PORT_C;
1737 case HPD_PORT_D:
1738 return PORT_D;
1739 default:
1740 return PORT_A; /* no hpd */
1741 }
1742 }
1743
1744 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1745 u32 hotplug_trigger,
1746 u32 dig_hotplug_reg,
1747 const u32 *hpd)
1748 {
1749 struct drm_i915_private *dev_priv = dev->dev_private;
1750 int i;
1751 enum port port;
1752 bool storm_detected = false;
1753 bool queue_dig = false, queue_hp = false;
1754 u32 dig_shift;
1755 u32 dig_port_mask = 0;
1756
1757 if (!hotplug_trigger)
1758 return;
1759
1760 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1761 hotplug_trigger, dig_hotplug_reg);
1762
1763 spin_lock(&dev_priv->irq_lock);
1764 for (i = 1; i < HPD_NUM_PINS; i++) {
1765 if (!(hpd[i] & hotplug_trigger))
1766 continue;
1767
1768 port = get_port_from_pin(i);
1769 if (port && dev_priv->hpd_irq_port[port]) {
1770 bool long_hpd;
1771
1772 if (IS_G4X(dev)) {
1773 dig_shift = g4x_port_to_hotplug_shift(port);
1774 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1775 } else {
1776 dig_shift = ilk_port_to_hotplug_shift(port);
1777 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1778 }
1779
1780 DRM_DEBUG_DRIVER("digital hpd port %d %d\n", port, long_hpd);
1781 /* for long HPD pulses we want to have the digital queue happen,
1782 but we still want HPD storm detection to function. */
1783 if (long_hpd) {
1784 dev_priv->long_hpd_port_mask |= (1 << port);
1785 dig_port_mask |= hpd[i];
1786 } else {
1787 /* for short HPD just trigger the digital queue */
1788 dev_priv->short_hpd_port_mask |= (1 << port);
1789 hotplug_trigger &= ~hpd[i];
1790 }
1791 queue_dig = true;
1792 }
1793 }
1794
1795 for (i = 1; i < HPD_NUM_PINS; i++) {
1796 if (hpd[i] & hotplug_trigger &&
1797 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1798 /*
1799 * On GMCH platforms the interrupt mask bits only
1800 * prevent irq generation, not the setting of the
1801 * hotplug bits itself. So only WARN about unexpected
1802 * interrupts on saner platforms.
1803 */
1804 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1805 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1806 hotplug_trigger, i, hpd[i]);
1807
1808 continue;
1809 }
1810
1811 if (!(hpd[i] & hotplug_trigger) ||
1812 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1813 continue;
1814
1815 if (!(dig_port_mask & hpd[i])) {
1816 dev_priv->hpd_event_bits |= (1 << i);
1817 queue_hp = true;
1818 }
1819
1820 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1821 dev_priv->hpd_stats[i].hpd_last_jiffies
1822 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1823 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1824 dev_priv->hpd_stats[i].hpd_cnt = 0;
1825 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1826 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1827 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1828 dev_priv->hpd_event_bits &= ~(1 << i);
1829 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1830 storm_detected = true;
1831 } else {
1832 dev_priv->hpd_stats[i].hpd_cnt++;
1833 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1834 dev_priv->hpd_stats[i].hpd_cnt);
1835 }
1836 }
1837
1838 if (storm_detected)
1839 dev_priv->display.hpd_irq_setup(dev);
1840 spin_unlock(&dev_priv->irq_lock);
1841
1842 /*
1843 * Our hotplug handler can grab modeset locks (by calling down into the
1844 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1845 * queue for otherwise the flush_work in the pageflip code will
1846 * deadlock.
1847 */
1848 if (queue_dig)
1849 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1850 if (queue_hp)
1851 schedule_work(&dev_priv->hotplug_work);
1852 }
1853
1854 static void gmbus_irq_handler(struct drm_device *dev)
1855 {
1856 struct drm_i915_private *dev_priv = dev->dev_private;
1857
1858 wake_up_all(&dev_priv->gmbus_wait_queue);
1859 }
1860
1861 static void dp_aux_irq_handler(struct drm_device *dev)
1862 {
1863 struct drm_i915_private *dev_priv = dev->dev_private;
1864
1865 wake_up_all(&dev_priv->gmbus_wait_queue);
1866 }
1867
1868 #if defined(CONFIG_DEBUG_FS)
1869 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1870 uint32_t crc0, uint32_t crc1,
1871 uint32_t crc2, uint32_t crc3,
1872 uint32_t crc4)
1873 {
1874 struct drm_i915_private *dev_priv = dev->dev_private;
1875 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1876 struct intel_pipe_crc_entry *entry;
1877 int head, tail;
1878
1879 spin_lock(&pipe_crc->lock);
1880
1881 if (!pipe_crc->entries) {
1882 spin_unlock(&pipe_crc->lock);
1883 DRM_ERROR("spurious interrupt\n");
1884 return;
1885 }
1886
1887 head = pipe_crc->head;
1888 tail = pipe_crc->tail;
1889
1890 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1891 spin_unlock(&pipe_crc->lock);
1892 DRM_ERROR("CRC buffer overflowing\n");
1893 return;
1894 }
1895
1896 entry = &pipe_crc->entries[head];
1897
1898 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1899 entry->crc[0] = crc0;
1900 entry->crc[1] = crc1;
1901 entry->crc[2] = crc2;
1902 entry->crc[3] = crc3;
1903 entry->crc[4] = crc4;
1904
1905 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1906 pipe_crc->head = head;
1907
1908 spin_unlock(&pipe_crc->lock);
1909
1910 wake_up_interruptible(&pipe_crc->wq);
1911 }
1912 #else
1913 static inline void
1914 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1915 uint32_t crc0, uint32_t crc1,
1916 uint32_t crc2, uint32_t crc3,
1917 uint32_t crc4) {}
1918 #endif
1919
1920
1921 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1922 {
1923 struct drm_i915_private *dev_priv = dev->dev_private;
1924
1925 display_pipe_crc_irq_handler(dev, pipe,
1926 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1927 0, 0, 0, 0);
1928 }
1929
1930 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1931 {
1932 struct drm_i915_private *dev_priv = dev->dev_private;
1933
1934 display_pipe_crc_irq_handler(dev, pipe,
1935 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1936 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1937 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1938 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1939 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1940 }
1941
1942 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1943 {
1944 struct drm_i915_private *dev_priv = dev->dev_private;
1945 uint32_t res1, res2;
1946
1947 if (INTEL_INFO(dev)->gen >= 3)
1948 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1949 else
1950 res1 = 0;
1951
1952 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1953 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1954 else
1955 res2 = 0;
1956
1957 display_pipe_crc_irq_handler(dev, pipe,
1958 I915_READ(PIPE_CRC_RES_RED(pipe)),
1959 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1960 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1961 res1, res2);
1962 }
1963
1964 /* The RPS events need forcewake, so we add them to a work queue and mask their
1965 * IMR bits until the work is done. Other interrupts can be processed without
1966 * the work queue. */
1967 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1968 {
1969 if (pm_iir & dev_priv->pm_rps_events) {
1970 spin_lock(&dev_priv->irq_lock);
1971 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1972 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1973 spin_unlock(&dev_priv->irq_lock);
1974
1975 queue_work(dev_priv->wq, &dev_priv->rps.work);
1976 }
1977
1978 if (HAS_VEBOX(dev_priv->dev)) {
1979 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1980 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1981
1982 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
1983 i915_handle_error(dev_priv->dev, false,
1984 "VEBOX CS error interrupt 0x%08x",
1985 pm_iir);
1986 }
1987 }
1988 }
1989
1990 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1991 {
1992 struct intel_crtc *crtc;
1993
1994 if (!drm_handle_vblank(dev, pipe))
1995 return false;
1996
1997 crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
1998 wake_up(&crtc->vbl_wait);
1999
2000 return true;
2001 }
2002
2003 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2004 {
2005 struct drm_i915_private *dev_priv = dev->dev_private;
2006 u32 pipe_stats[I915_MAX_PIPES] = { };
2007 int pipe;
2008
2009 spin_lock(&dev_priv->irq_lock);
2010 for_each_pipe(pipe) {
2011 int reg;
2012 u32 mask, iir_bit = 0;
2013
2014 /*
2015 * PIPESTAT bits get signalled even when the interrupt is
2016 * disabled with the mask bits, and some of the status bits do
2017 * not generate interrupts at all (like the underrun bit). Hence
2018 * we need to be careful that we only handle what we want to
2019 * handle.
2020 */
2021 mask = 0;
2022 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2023 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2024
2025 switch (pipe) {
2026 case PIPE_A:
2027 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2028 break;
2029 case PIPE_B:
2030 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2031 break;
2032 case PIPE_C:
2033 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2034 break;
2035 }
2036 if (iir & iir_bit)
2037 mask |= dev_priv->pipestat_irq_mask[pipe];
2038
2039 if (!mask)
2040 continue;
2041
2042 reg = PIPESTAT(pipe);
2043 mask |= PIPESTAT_INT_ENABLE_MASK;
2044 pipe_stats[pipe] = I915_READ(reg) & mask;
2045
2046 /*
2047 * Clear the PIPE*STAT regs before the IIR
2048 */
2049 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2050 PIPESTAT_INT_STATUS_MASK))
2051 I915_WRITE(reg, pipe_stats[pipe]);
2052 }
2053 spin_unlock(&dev_priv->irq_lock);
2054
2055 for_each_pipe(pipe) {
2056 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
2057 intel_pipe_handle_vblank(dev, pipe);
2058
2059 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2060 intel_prepare_page_flip(dev, pipe);
2061 intel_finish_page_flip(dev, pipe);
2062 }
2063
2064 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2065 i9xx_pipe_crc_irq_handler(dev, pipe);
2066
2067 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2068 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2069 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2070 }
2071
2072 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2073 gmbus_irq_handler(dev);
2074 }
2075
2076 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2077 {
2078 struct drm_i915_private *dev_priv = dev->dev_private;
2079 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2080
2081 if (hotplug_status) {
2082 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2083 /*
2084 * Make sure hotplug status is cleared before we clear IIR, or else we
2085 * may miss hotplug events.
2086 */
2087 POSTING_READ(PORT_HOTPLUG_STAT);
2088
2089 if (IS_G4X(dev)) {
2090 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2091
2092 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2093 } else {
2094 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2095
2096 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2097 }
2098
2099 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2100 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2101 dp_aux_irq_handler(dev);
2102 }
2103 }
2104
2105 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2106 {
2107 struct drm_device *dev = arg;
2108 struct drm_i915_private *dev_priv = dev->dev_private;
2109 u32 iir, gt_iir, pm_iir;
2110 irqreturn_t ret = IRQ_NONE;
2111
2112 while (true) {
2113 /* Find, clear, then process each source of interrupt */
2114
2115 gt_iir = I915_READ(GTIIR);
2116 if (gt_iir)
2117 I915_WRITE(GTIIR, gt_iir);
2118
2119 pm_iir = I915_READ(GEN6_PMIIR);
2120 if (pm_iir)
2121 I915_WRITE(GEN6_PMIIR, pm_iir);
2122
2123 iir = I915_READ(VLV_IIR);
2124 if (iir) {
2125 /* Consume port before clearing IIR or we'll miss events */
2126 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2127 i9xx_hpd_irq_handler(dev);
2128 I915_WRITE(VLV_IIR, iir);
2129 }
2130
2131 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2132 goto out;
2133
2134 ret = IRQ_HANDLED;
2135
2136 if (gt_iir)
2137 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2138 if (pm_iir)
2139 gen6_rps_irq_handler(dev_priv, pm_iir);
2140 /* Call regardless, as some status bits might not be
2141 * signalled in iir */
2142 valleyview_pipestat_irq_handler(dev, iir);
2143 }
2144
2145 out:
2146 return ret;
2147 }
2148
2149 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2150 {
2151 struct drm_device *dev = arg;
2152 struct drm_i915_private *dev_priv = dev->dev_private;
2153 u32 master_ctl, iir;
2154 irqreturn_t ret = IRQ_NONE;
2155
2156 for (;;) {
2157 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2158 iir = I915_READ(VLV_IIR);
2159
2160 if (master_ctl == 0 && iir == 0)
2161 break;
2162
2163 ret = IRQ_HANDLED;
2164
2165 I915_WRITE(GEN8_MASTER_IRQ, 0);
2166
2167 /* Find, clear, then process each source of interrupt */
2168
2169 if (iir) {
2170 /* Consume port before clearing IIR or we'll miss events */
2171 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2172 i9xx_hpd_irq_handler(dev);
2173 I915_WRITE(VLV_IIR, iir);
2174 }
2175
2176 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2177
2178 /* Call regardless, as some status bits might not be
2179 * signalled in iir */
2180 valleyview_pipestat_irq_handler(dev, iir);
2181
2182 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2183 POSTING_READ(GEN8_MASTER_IRQ);
2184 }
2185
2186 return ret;
2187 }
2188
2189 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2190 {
2191 struct drm_i915_private *dev_priv = dev->dev_private;
2192 int pipe;
2193 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2194 u32 dig_hotplug_reg;
2195
2196 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2197 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2198
2199 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2200
2201 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2202 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2203 SDE_AUDIO_POWER_SHIFT);
2204 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2205 port_name(port));
2206 }
2207
2208 if (pch_iir & SDE_AUX_MASK)
2209 dp_aux_irq_handler(dev);
2210
2211 if (pch_iir & SDE_GMBUS)
2212 gmbus_irq_handler(dev);
2213
2214 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2215 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2216
2217 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2218 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2219
2220 if (pch_iir & SDE_POISON)
2221 DRM_ERROR("PCH poison interrupt\n");
2222
2223 if (pch_iir & SDE_FDI_MASK)
2224 for_each_pipe(pipe)
2225 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2226 pipe_name(pipe),
2227 I915_READ(FDI_RX_IIR(pipe)));
2228
2229 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2230 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2231
2232 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2233 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2234
2235 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2236 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2237 false))
2238 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2239
2240 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2241 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2242 false))
2243 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2244 }
2245
2246 static void ivb_err_int_handler(struct drm_device *dev)
2247 {
2248 struct drm_i915_private *dev_priv = dev->dev_private;
2249 u32 err_int = I915_READ(GEN7_ERR_INT);
2250 enum pipe pipe;
2251
2252 if (err_int & ERR_INT_POISON)
2253 DRM_ERROR("Poison interrupt\n");
2254
2255 for_each_pipe(pipe) {
2256 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2257 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2258 false))
2259 DRM_ERROR("Pipe %c FIFO underrun\n",
2260 pipe_name(pipe));
2261 }
2262
2263 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2264 if (IS_IVYBRIDGE(dev))
2265 ivb_pipe_crc_irq_handler(dev, pipe);
2266 else
2267 hsw_pipe_crc_irq_handler(dev, pipe);
2268 }
2269 }
2270
2271 I915_WRITE(GEN7_ERR_INT, err_int);
2272 }
2273
2274 static void cpt_serr_int_handler(struct drm_device *dev)
2275 {
2276 struct drm_i915_private *dev_priv = dev->dev_private;
2277 u32 serr_int = I915_READ(SERR_INT);
2278
2279 if (serr_int & SERR_INT_POISON)
2280 DRM_ERROR("PCH poison interrupt\n");
2281
2282 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2283 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2284 false))
2285 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2286
2287 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2288 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2289 false))
2290 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2291
2292 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2293 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2294 false))
2295 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2296
2297 I915_WRITE(SERR_INT, serr_int);
2298 }
2299
2300 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2301 {
2302 struct drm_i915_private *dev_priv = dev->dev_private;
2303 int pipe;
2304 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2305 u32 dig_hotplug_reg;
2306
2307 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2308 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2309
2310 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2311
2312 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2313 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2314 SDE_AUDIO_POWER_SHIFT_CPT);
2315 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2316 port_name(port));
2317 }
2318
2319 if (pch_iir & SDE_AUX_MASK_CPT)
2320 dp_aux_irq_handler(dev);
2321
2322 if (pch_iir & SDE_GMBUS_CPT)
2323 gmbus_irq_handler(dev);
2324
2325 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2326 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2327
2328 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2329 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2330
2331 if (pch_iir & SDE_FDI_MASK_CPT)
2332 for_each_pipe(pipe)
2333 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2334 pipe_name(pipe),
2335 I915_READ(FDI_RX_IIR(pipe)));
2336
2337 if (pch_iir & SDE_ERROR_CPT)
2338 cpt_serr_int_handler(dev);
2339 }
2340
2341 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2342 {
2343 struct drm_i915_private *dev_priv = dev->dev_private;
2344 enum pipe pipe;
2345
2346 if (de_iir & DE_AUX_CHANNEL_A)
2347 dp_aux_irq_handler(dev);
2348
2349 if (de_iir & DE_GSE)
2350 intel_opregion_asle_intr(dev);
2351
2352 if (de_iir & DE_POISON)
2353 DRM_ERROR("Poison interrupt\n");
2354
2355 for_each_pipe(pipe) {
2356 if (de_iir & DE_PIPE_VBLANK(pipe))
2357 intel_pipe_handle_vblank(dev, pipe);
2358
2359 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2360 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2361 DRM_ERROR("Pipe %c FIFO underrun\n",
2362 pipe_name(pipe));
2363
2364 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2365 i9xx_pipe_crc_irq_handler(dev, pipe);
2366
2367 /* plane/pipes map 1:1 on ilk+ */
2368 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2369 intel_prepare_page_flip(dev, pipe);
2370 intel_finish_page_flip_plane(dev, pipe);
2371 }
2372 }
2373
2374 /* check event from PCH */
2375 if (de_iir & DE_PCH_EVENT) {
2376 u32 pch_iir = I915_READ(SDEIIR);
2377
2378 if (HAS_PCH_CPT(dev))
2379 cpt_irq_handler(dev, pch_iir);
2380 else
2381 ibx_irq_handler(dev, pch_iir);
2382
2383 /* should clear PCH hotplug event before clear CPU irq */
2384 I915_WRITE(SDEIIR, pch_iir);
2385 }
2386
2387 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2388 ironlake_rps_change_irq_handler(dev);
2389 }
2390
2391 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2392 {
2393 struct drm_i915_private *dev_priv = dev->dev_private;
2394 enum pipe pipe;
2395
2396 if (de_iir & DE_ERR_INT_IVB)
2397 ivb_err_int_handler(dev);
2398
2399 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2400 dp_aux_irq_handler(dev);
2401
2402 if (de_iir & DE_GSE_IVB)
2403 intel_opregion_asle_intr(dev);
2404
2405 for_each_pipe(pipe) {
2406 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
2407 intel_pipe_handle_vblank(dev, pipe);
2408
2409 /* plane/pipes map 1:1 on ilk+ */
2410 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2411 intel_prepare_page_flip(dev, pipe);
2412 intel_finish_page_flip_plane(dev, pipe);
2413 }
2414 }
2415
2416 /* check event from PCH */
2417 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2418 u32 pch_iir = I915_READ(SDEIIR);
2419
2420 cpt_irq_handler(dev, pch_iir);
2421
2422 /* clear PCH hotplug event before clear CPU irq */
2423 I915_WRITE(SDEIIR, pch_iir);
2424 }
2425 }
2426
2427 /*
2428 * To handle irqs with the minimum potential races with fresh interrupts, we:
2429 * 1 - Disable Master Interrupt Control.
2430 * 2 - Find the source(s) of the interrupt.
2431 * 3 - Clear the Interrupt Identity bits (IIR).
2432 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2433 * 5 - Re-enable Master Interrupt Control.
2434 */
2435 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2436 {
2437 struct drm_device *dev = arg;
2438 struct drm_i915_private *dev_priv = dev->dev_private;
2439 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2440 irqreturn_t ret = IRQ_NONE;
2441
2442 /* We get interrupts on unclaimed registers, so check for this before we
2443 * do any I915_{READ,WRITE}. */
2444 intel_uncore_check_errors(dev);
2445
2446 /* disable master interrupt before clearing iir */
2447 de_ier = I915_READ(DEIER);
2448 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2449 POSTING_READ(DEIER);
2450
2451 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2452 * interrupts will will be stored on its back queue, and then we'll be
2453 * able to process them after we restore SDEIER (as soon as we restore
2454 * it, we'll get an interrupt if SDEIIR still has something to process
2455 * due to its back queue). */
2456 if (!HAS_PCH_NOP(dev)) {
2457 sde_ier = I915_READ(SDEIER);
2458 I915_WRITE(SDEIER, 0);
2459 POSTING_READ(SDEIER);
2460 }
2461
2462 /* Find, clear, then process each source of interrupt */
2463
2464 gt_iir = I915_READ(GTIIR);
2465 if (gt_iir) {
2466 I915_WRITE(GTIIR, gt_iir);
2467 ret = IRQ_HANDLED;
2468 if (INTEL_INFO(dev)->gen >= 6)
2469 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2470 else
2471 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2472 }
2473
2474 de_iir = I915_READ(DEIIR);
2475 if (de_iir) {
2476 I915_WRITE(DEIIR, de_iir);
2477 ret = IRQ_HANDLED;
2478 if (INTEL_INFO(dev)->gen >= 7)
2479 ivb_display_irq_handler(dev, de_iir);
2480 else
2481 ilk_display_irq_handler(dev, de_iir);
2482 }
2483
2484 if (INTEL_INFO(dev)->gen >= 6) {
2485 u32 pm_iir = I915_READ(GEN6_PMIIR);
2486 if (pm_iir) {
2487 I915_WRITE(GEN6_PMIIR, pm_iir);
2488 ret = IRQ_HANDLED;
2489 gen6_rps_irq_handler(dev_priv, pm_iir);
2490 }
2491 }
2492
2493 I915_WRITE(DEIER, de_ier);
2494 POSTING_READ(DEIER);
2495 if (!HAS_PCH_NOP(dev)) {
2496 I915_WRITE(SDEIER, sde_ier);
2497 POSTING_READ(SDEIER);
2498 }
2499
2500 return ret;
2501 }
2502
2503 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2504 {
2505 struct drm_device *dev = arg;
2506 struct drm_i915_private *dev_priv = dev->dev_private;
2507 u32 master_ctl;
2508 irqreturn_t ret = IRQ_NONE;
2509 uint32_t tmp = 0;
2510 enum pipe pipe;
2511
2512 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2513 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2514 if (!master_ctl)
2515 return IRQ_NONE;
2516
2517 I915_WRITE(GEN8_MASTER_IRQ, 0);
2518 POSTING_READ(GEN8_MASTER_IRQ);
2519
2520 /* Find, clear, then process each source of interrupt */
2521
2522 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2523
2524 if (master_ctl & GEN8_DE_MISC_IRQ) {
2525 tmp = I915_READ(GEN8_DE_MISC_IIR);
2526 if (tmp) {
2527 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2528 ret = IRQ_HANDLED;
2529 if (tmp & GEN8_DE_MISC_GSE)
2530 intel_opregion_asle_intr(dev);
2531 else
2532 DRM_ERROR("Unexpected DE Misc interrupt\n");
2533 }
2534 else
2535 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2536 }
2537
2538 if (master_ctl & GEN8_DE_PORT_IRQ) {
2539 tmp = I915_READ(GEN8_DE_PORT_IIR);
2540 if (tmp) {
2541 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2542 ret = IRQ_HANDLED;
2543 if (tmp & GEN8_AUX_CHANNEL_A)
2544 dp_aux_irq_handler(dev);
2545 else
2546 DRM_ERROR("Unexpected DE Port interrupt\n");
2547 }
2548 else
2549 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2550 }
2551
2552 for_each_pipe(pipe) {
2553 uint32_t pipe_iir;
2554
2555 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2556 continue;
2557
2558 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2559 if (pipe_iir) {
2560 ret = IRQ_HANDLED;
2561 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2562 if (pipe_iir & GEN8_PIPE_VBLANK)
2563 intel_pipe_handle_vblank(dev, pipe);
2564
2565 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2566 intel_prepare_page_flip(dev, pipe);
2567 intel_finish_page_flip_plane(dev, pipe);
2568 }
2569
2570 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2571 hsw_pipe_crc_irq_handler(dev, pipe);
2572
2573 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2574 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2575 false))
2576 DRM_ERROR("Pipe %c FIFO underrun\n",
2577 pipe_name(pipe));
2578 }
2579
2580 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2581 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2582 pipe_name(pipe),
2583 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2584 }
2585 } else
2586 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2587 }
2588
2589 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2590 /*
2591 * FIXME(BDW): Assume for now that the new interrupt handling
2592 * scheme also closed the SDE interrupt handling race we've seen
2593 * on older pch-split platforms. But this needs testing.
2594 */
2595 u32 pch_iir = I915_READ(SDEIIR);
2596 if (pch_iir) {
2597 I915_WRITE(SDEIIR, pch_iir);
2598 ret = IRQ_HANDLED;
2599 cpt_irq_handler(dev, pch_iir);
2600 } else
2601 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2602
2603 }
2604
2605 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2606 POSTING_READ(GEN8_MASTER_IRQ);
2607
2608 return ret;
2609 }
2610
2611 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2612 bool reset_completed)
2613 {
2614 struct intel_engine_cs *ring;
2615 int i;
2616
2617 /*
2618 * Notify all waiters for GPU completion events that reset state has
2619 * been changed, and that they need to restart their wait after
2620 * checking for potential errors (and bail out to drop locks if there is
2621 * a gpu reset pending so that i915_error_work_func can acquire them).
2622 */
2623
2624 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2625 for_each_ring(ring, dev_priv, i)
2626 wake_up_all(&ring->irq_queue);
2627
2628 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2629 wake_up_all(&dev_priv->pending_flip_queue);
2630
2631 /*
2632 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2633 * reset state is cleared.
2634 */
2635 if (reset_completed)
2636 wake_up_all(&dev_priv->gpu_error.reset_queue);
2637 }
2638
2639 /**
2640 * i915_error_work_func - do process context error handling work
2641 * @work: work struct
2642 *
2643 * Fire an error uevent so userspace can see that a hang or error
2644 * was detected.
2645 */
2646 static void i915_error_work_func(struct work_struct *work)
2647 {
2648 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2649 work);
2650 struct drm_i915_private *dev_priv =
2651 container_of(error, struct drm_i915_private, gpu_error);
2652 struct drm_device *dev = dev_priv->dev;
2653 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2654 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2655 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2656 int ret;
2657
2658 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2659
2660 /*
2661 * Note that there's only one work item which does gpu resets, so we
2662 * need not worry about concurrent gpu resets potentially incrementing
2663 * error->reset_counter twice. We only need to take care of another
2664 * racing irq/hangcheck declaring the gpu dead for a second time. A
2665 * quick check for that is good enough: schedule_work ensures the
2666 * correct ordering between hang detection and this work item, and since
2667 * the reset in-progress bit is only ever set by code outside of this
2668 * work we don't need to worry about any other races.
2669 */
2670 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2671 DRM_DEBUG_DRIVER("resetting chip\n");
2672 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2673 reset_event);
2674
2675 /*
2676 * In most cases it's guaranteed that we get here with an RPM
2677 * reference held, for example because there is a pending GPU
2678 * request that won't finish until the reset is done. This
2679 * isn't the case at least when we get here by doing a
2680 * simulated reset via debugs, so get an RPM reference.
2681 */
2682 intel_runtime_pm_get(dev_priv);
2683 /*
2684 * All state reset _must_ be completed before we update the
2685 * reset counter, for otherwise waiters might miss the reset
2686 * pending state and not properly drop locks, resulting in
2687 * deadlocks with the reset work.
2688 */
2689 ret = i915_reset(dev);
2690
2691 intel_display_handle_reset(dev);
2692
2693 intel_runtime_pm_put(dev_priv);
2694
2695 if (ret == 0) {
2696 /*
2697 * After all the gem state is reset, increment the reset
2698 * counter and wake up everyone waiting for the reset to
2699 * complete.
2700 *
2701 * Since unlock operations are a one-sided barrier only,
2702 * we need to insert a barrier here to order any seqno
2703 * updates before
2704 * the counter increment.
2705 */
2706 smp_mb__before_atomic();
2707 atomic_inc(&dev_priv->gpu_error.reset_counter);
2708
2709 kobject_uevent_env(&dev->primary->kdev->kobj,
2710 KOBJ_CHANGE, reset_done_event);
2711 } else {
2712 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2713 }
2714
2715 /*
2716 * Note: The wake_up also serves as a memory barrier so that
2717 * waiters see the update value of the reset counter atomic_t.
2718 */
2719 i915_error_wake_up(dev_priv, true);
2720 }
2721 }
2722
2723 static void i915_report_and_clear_eir(struct drm_device *dev)
2724 {
2725 struct drm_i915_private *dev_priv = dev->dev_private;
2726 uint32_t instdone[I915_NUM_INSTDONE_REG];
2727 u32 eir = I915_READ(EIR);
2728 int pipe, i;
2729
2730 if (!eir)
2731 return;
2732
2733 pr_err("render error detected, EIR: 0x%08x\n", eir);
2734
2735 i915_get_extra_instdone(dev, instdone);
2736
2737 if (IS_G4X(dev)) {
2738 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2739 u32 ipeir = I915_READ(IPEIR_I965);
2740
2741 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2742 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2743 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2744 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2745 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2746 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2747 I915_WRITE(IPEIR_I965, ipeir);
2748 POSTING_READ(IPEIR_I965);
2749 }
2750 if (eir & GM45_ERROR_PAGE_TABLE) {
2751 u32 pgtbl_err = I915_READ(PGTBL_ER);
2752 pr_err("page table error\n");
2753 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2754 I915_WRITE(PGTBL_ER, pgtbl_err);
2755 POSTING_READ(PGTBL_ER);
2756 }
2757 }
2758
2759 if (!IS_GEN2(dev)) {
2760 if (eir & I915_ERROR_PAGE_TABLE) {
2761 u32 pgtbl_err = I915_READ(PGTBL_ER);
2762 pr_err("page table error\n");
2763 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2764 I915_WRITE(PGTBL_ER, pgtbl_err);
2765 POSTING_READ(PGTBL_ER);
2766 }
2767 }
2768
2769 if (eir & I915_ERROR_MEMORY_REFRESH) {
2770 pr_err("memory refresh error:\n");
2771 for_each_pipe(pipe)
2772 pr_err("pipe %c stat: 0x%08x\n",
2773 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2774 /* pipestat has already been acked */
2775 }
2776 if (eir & I915_ERROR_INSTRUCTION) {
2777 pr_err("instruction error\n");
2778 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2779 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2780 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2781 if (INTEL_INFO(dev)->gen < 4) {
2782 u32 ipeir = I915_READ(IPEIR);
2783
2784 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2785 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2786 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2787 I915_WRITE(IPEIR, ipeir);
2788 POSTING_READ(IPEIR);
2789 } else {
2790 u32 ipeir = I915_READ(IPEIR_I965);
2791
2792 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2793 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2794 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2795 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2796 I915_WRITE(IPEIR_I965, ipeir);
2797 POSTING_READ(IPEIR_I965);
2798 }
2799 }
2800
2801 I915_WRITE(EIR, eir);
2802 POSTING_READ(EIR);
2803 eir = I915_READ(EIR);
2804 if (eir) {
2805 /*
2806 * some errors might have become stuck,
2807 * mask them.
2808 */
2809 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2810 I915_WRITE(EMR, I915_READ(EMR) | eir);
2811 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2812 }
2813 }
2814
2815 /**
2816 * i915_handle_error - handle an error interrupt
2817 * @dev: drm device
2818 *
2819 * Do some basic checking of regsiter state at error interrupt time and
2820 * dump it to the syslog. Also call i915_capture_error_state() to make
2821 * sure we get a record and make it available in debugfs. Fire a uevent
2822 * so userspace knows something bad happened (should trigger collection
2823 * of a ring dump etc.).
2824 */
2825 void i915_handle_error(struct drm_device *dev, bool wedged,
2826 const char *fmt, ...)
2827 {
2828 struct drm_i915_private *dev_priv = dev->dev_private;
2829 va_list args;
2830 char error_msg[80];
2831
2832 va_start(args, fmt);
2833 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2834 va_end(args);
2835
2836 i915_capture_error_state(dev, wedged, error_msg);
2837 i915_report_and_clear_eir(dev);
2838
2839 if (wedged) {
2840 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2841 &dev_priv->gpu_error.reset_counter);
2842
2843 /*
2844 * Wakeup waiting processes so that the reset work function
2845 * i915_error_work_func doesn't deadlock trying to grab various
2846 * locks. By bumping the reset counter first, the woken
2847 * processes will see a reset in progress and back off,
2848 * releasing their locks and then wait for the reset completion.
2849 * We must do this for _all_ gpu waiters that might hold locks
2850 * that the reset work needs to acquire.
2851 *
2852 * Note: The wake_up serves as the required memory barrier to
2853 * ensure that the waiters see the updated value of the reset
2854 * counter atomic_t.
2855 */
2856 i915_error_wake_up(dev_priv, false);
2857 }
2858
2859 /*
2860 * Our reset work can grab modeset locks (since it needs to reset the
2861 * state of outstanding pagelips). Hence it must not be run on our own
2862 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2863 * code will deadlock.
2864 */
2865 schedule_work(&dev_priv->gpu_error.work);
2866 }
2867
2868 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
2869 {
2870 struct drm_i915_private *dev_priv = dev->dev_private;
2871 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2872 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2873 struct drm_i915_gem_object *obj;
2874 struct intel_unpin_work *work;
2875 unsigned long flags;
2876 bool stall_detected;
2877
2878 /* Ignore early vblank irqs */
2879 if (intel_crtc == NULL)
2880 return;
2881
2882 spin_lock_irqsave(&dev->event_lock, flags);
2883 work = intel_crtc->unpin_work;
2884
2885 if (work == NULL ||
2886 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2887 !work->enable_stall_check) {
2888 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2889 spin_unlock_irqrestore(&dev->event_lock, flags);
2890 return;
2891 }
2892
2893 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
2894 obj = work->pending_flip_obj;
2895 if (INTEL_INFO(dev)->gen >= 4) {
2896 int dspsurf = DSPSURF(intel_crtc->plane);
2897 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2898 i915_gem_obj_ggtt_offset(obj);
2899 } else {
2900 int dspaddr = DSPADDR(intel_crtc->plane);
2901 stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) +
2902 crtc->y * crtc->primary->fb->pitches[0] +
2903 crtc->x * crtc->primary->fb->bits_per_pixel/8);
2904 }
2905
2906 spin_unlock_irqrestore(&dev->event_lock, flags);
2907
2908 if (stall_detected) {
2909 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2910 intel_prepare_page_flip(dev, intel_crtc->plane);
2911 }
2912 }
2913
2914 /* Called from drm generic code, passed 'crtc' which
2915 * we use as a pipe index
2916 */
2917 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2918 {
2919 struct drm_i915_private *dev_priv = dev->dev_private;
2920 unsigned long irqflags;
2921
2922 if (!i915_pipe_enabled(dev, pipe))
2923 return -EINVAL;
2924
2925 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2926 if (INTEL_INFO(dev)->gen >= 4)
2927 i915_enable_pipestat(dev_priv, pipe,
2928 PIPE_START_VBLANK_INTERRUPT_STATUS);
2929 else
2930 i915_enable_pipestat(dev_priv, pipe,
2931 PIPE_VBLANK_INTERRUPT_STATUS);
2932 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2933
2934 return 0;
2935 }
2936
2937 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2938 {
2939 struct drm_i915_private *dev_priv = dev->dev_private;
2940 unsigned long irqflags;
2941 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2942 DE_PIPE_VBLANK(pipe);
2943
2944 if (!i915_pipe_enabled(dev, pipe))
2945 return -EINVAL;
2946
2947 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2948 ironlake_enable_display_irq(dev_priv, bit);
2949 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2950
2951 return 0;
2952 }
2953
2954 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2955 {
2956 struct drm_i915_private *dev_priv = dev->dev_private;
2957 unsigned long irqflags;
2958
2959 if (!i915_pipe_enabled(dev, pipe))
2960 return -EINVAL;
2961
2962 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2963 i915_enable_pipestat(dev_priv, pipe,
2964 PIPE_START_VBLANK_INTERRUPT_STATUS);
2965 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2966
2967 return 0;
2968 }
2969
2970 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2971 {
2972 struct drm_i915_private *dev_priv = dev->dev_private;
2973 unsigned long irqflags;
2974
2975 if (!i915_pipe_enabled(dev, pipe))
2976 return -EINVAL;
2977
2978 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2979 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2980 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2981 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2982 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2983 return 0;
2984 }
2985
2986 /* Called from drm generic code, passed 'crtc' which
2987 * we use as a pipe index
2988 */
2989 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2990 {
2991 struct drm_i915_private *dev_priv = dev->dev_private;
2992 unsigned long irqflags;
2993
2994 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2995 i915_disable_pipestat(dev_priv, pipe,
2996 PIPE_VBLANK_INTERRUPT_STATUS |
2997 PIPE_START_VBLANK_INTERRUPT_STATUS);
2998 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2999 }
3000
3001 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
3002 {
3003 struct drm_i915_private *dev_priv = dev->dev_private;
3004 unsigned long irqflags;
3005 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
3006 DE_PIPE_VBLANK(pipe);
3007
3008 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3009 ironlake_disable_display_irq(dev_priv, bit);
3010 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3011 }
3012
3013 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3014 {
3015 struct drm_i915_private *dev_priv = dev->dev_private;
3016 unsigned long irqflags;
3017
3018 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3019 i915_disable_pipestat(dev_priv, pipe,
3020 PIPE_START_VBLANK_INTERRUPT_STATUS);
3021 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3022 }
3023
3024 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3025 {
3026 struct drm_i915_private *dev_priv = dev->dev_private;
3027 unsigned long irqflags;
3028
3029 if (!i915_pipe_enabled(dev, pipe))
3030 return;
3031
3032 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3033 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3034 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3035 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3036 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3037 }
3038
3039 static u32
3040 ring_last_seqno(struct intel_engine_cs *ring)
3041 {
3042 return list_entry(ring->request_list.prev,
3043 struct drm_i915_gem_request, list)->seqno;
3044 }
3045
3046 static bool
3047 ring_idle(struct intel_engine_cs *ring, u32 seqno)
3048 {
3049 return (list_empty(&ring->request_list) ||
3050 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3051 }
3052
3053 static bool
3054 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3055 {
3056 if (INTEL_INFO(dev)->gen >= 8) {
3057 return (ipehr >> 23) == 0x1c;
3058 } else {
3059 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3060 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3061 MI_SEMAPHORE_REGISTER);
3062 }
3063 }
3064
3065 static struct intel_engine_cs *
3066 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3067 {
3068 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3069 struct intel_engine_cs *signaller;
3070 int i;
3071
3072 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3073 for_each_ring(signaller, dev_priv, i) {
3074 if (ring == signaller)
3075 continue;
3076
3077 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3078 return signaller;
3079 }
3080 } else {
3081 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3082
3083 for_each_ring(signaller, dev_priv, i) {
3084 if(ring == signaller)
3085 continue;
3086
3087 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3088 return signaller;
3089 }
3090 }
3091
3092 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3093 ring->id, ipehr, offset);
3094
3095 return NULL;
3096 }
3097
3098 static struct intel_engine_cs *
3099 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3100 {
3101 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3102 u32 cmd, ipehr, head;
3103 u64 offset = 0;
3104 int i, backwards;
3105
3106 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3107 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3108 return NULL;
3109
3110 /*
3111 * HEAD is likely pointing to the dword after the actual command,
3112 * so scan backwards until we find the MBOX. But limit it to just 3
3113 * or 4 dwords depending on the semaphore wait command size.
3114 * Note that we don't care about ACTHD here since that might
3115 * point at at batch, and semaphores are always emitted into the
3116 * ringbuffer itself.
3117 */
3118 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3119 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3120
3121 for (i = backwards; i; --i) {
3122 /*
3123 * Be paranoid and presume the hw has gone off into the wild -
3124 * our ring is smaller than what the hardware (and hence
3125 * HEAD_ADDR) allows. Also handles wrap-around.
3126 */
3127 head &= ring->buffer->size - 1;
3128
3129 /* This here seems to blow up */
3130 cmd = ioread32(ring->buffer->virtual_start + head);
3131 if (cmd == ipehr)
3132 break;
3133
3134 head -= 4;
3135 }
3136
3137 if (!i)
3138 return NULL;
3139
3140 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3141 if (INTEL_INFO(ring->dev)->gen >= 8) {
3142 offset = ioread32(ring->buffer->virtual_start + head + 12);
3143 offset <<= 32;
3144 offset = ioread32(ring->buffer->virtual_start + head + 8);
3145 }
3146 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3147 }
3148
3149 static int semaphore_passed(struct intel_engine_cs *ring)
3150 {
3151 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3152 struct intel_engine_cs *signaller;
3153 u32 seqno;
3154
3155 ring->hangcheck.deadlock++;
3156
3157 signaller = semaphore_waits_for(ring, &seqno);
3158 if (signaller == NULL)
3159 return -1;
3160
3161 /* Prevent pathological recursion due to driver bugs */
3162 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3163 return -1;
3164
3165 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3166 return 1;
3167
3168 /* cursory check for an unkickable deadlock */
3169 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3170 semaphore_passed(signaller) < 0)
3171 return -1;
3172
3173 return 0;
3174 }
3175
3176 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3177 {
3178 struct intel_engine_cs *ring;
3179 int i;
3180
3181 for_each_ring(ring, dev_priv, i)
3182 ring->hangcheck.deadlock = 0;
3183 }
3184
3185 static enum intel_ring_hangcheck_action
3186 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3187 {
3188 struct drm_device *dev = ring->dev;
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3190 u32 tmp;
3191
3192 if (ring->hangcheck.acthd != acthd)
3193 return HANGCHECK_ACTIVE;
3194
3195 if (IS_GEN2(dev))
3196 return HANGCHECK_HUNG;
3197
3198 /* Is the chip hanging on a WAIT_FOR_EVENT?
3199 * If so we can simply poke the RB_WAIT bit
3200 * and break the hang. This should work on
3201 * all but the second generation chipsets.
3202 */
3203 tmp = I915_READ_CTL(ring);
3204 if (tmp & RING_WAIT) {
3205 i915_handle_error(dev, false,
3206 "Kicking stuck wait on %s",
3207 ring->name);
3208 I915_WRITE_CTL(ring, tmp);
3209 return HANGCHECK_KICK;
3210 }
3211
3212 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3213 switch (semaphore_passed(ring)) {
3214 default:
3215 return HANGCHECK_HUNG;
3216 case 1:
3217 i915_handle_error(dev, false,
3218 "Kicking stuck semaphore on %s",
3219 ring->name);
3220 I915_WRITE_CTL(ring, tmp);
3221 return HANGCHECK_KICK;
3222 case 0:
3223 return HANGCHECK_WAIT;
3224 }
3225 }
3226
3227 return HANGCHECK_HUNG;
3228 }
3229
3230 /**
3231 * This is called when the chip hasn't reported back with completed
3232 * batchbuffers in a long time. We keep track per ring seqno progress and
3233 * if there are no progress, hangcheck score for that ring is increased.
3234 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3235 * we kick the ring. If we see no progress on three subsequent calls
3236 * we assume chip is wedged and try to fix it by resetting the chip.
3237 */
3238 static void i915_hangcheck_elapsed(unsigned long data)
3239 {
3240 struct drm_device *dev = (struct drm_device *)data;
3241 struct drm_i915_private *dev_priv = dev->dev_private;
3242 struct intel_engine_cs *ring;
3243 int i;
3244 int busy_count = 0, rings_hung = 0;
3245 bool stuck[I915_NUM_RINGS] = { 0 };
3246 #define BUSY 1
3247 #define KICK 5
3248 #define HUNG 20
3249
3250 if (!i915.enable_hangcheck)
3251 return;
3252
3253 for_each_ring(ring, dev_priv, i) {
3254 u64 acthd;
3255 u32 seqno;
3256 bool busy = true;
3257
3258 semaphore_clear_deadlocks(dev_priv);
3259
3260 seqno = ring->get_seqno(ring, false);
3261 acthd = intel_ring_get_active_head(ring);
3262
3263 if (ring->hangcheck.seqno == seqno) {
3264 if (ring_idle(ring, seqno)) {
3265 ring->hangcheck.action = HANGCHECK_IDLE;
3266
3267 if (waitqueue_active(&ring->irq_queue)) {
3268 /* Issue a wake-up to catch stuck h/w. */
3269 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3270 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3271 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3272 ring->name);
3273 else
3274 DRM_INFO("Fake missed irq on %s\n",
3275 ring->name);
3276 wake_up_all(&ring->irq_queue);
3277 }
3278 /* Safeguard against driver failure */
3279 ring->hangcheck.score += BUSY;
3280 } else
3281 busy = false;
3282 } else {
3283 /* We always increment the hangcheck score
3284 * if the ring is busy and still processing
3285 * the same request, so that no single request
3286 * can run indefinitely (such as a chain of
3287 * batches). The only time we do not increment
3288 * the hangcheck score on this ring, if this
3289 * ring is in a legitimate wait for another
3290 * ring. In that case the waiting ring is a
3291 * victim and we want to be sure we catch the
3292 * right culprit. Then every time we do kick
3293 * the ring, add a small increment to the
3294 * score so that we can catch a batch that is
3295 * being repeatedly kicked and so responsible
3296 * for stalling the machine.
3297 */
3298 ring->hangcheck.action = ring_stuck(ring,
3299 acthd);
3300
3301 switch (ring->hangcheck.action) {
3302 case HANGCHECK_IDLE:
3303 case HANGCHECK_WAIT:
3304 break;
3305 case HANGCHECK_ACTIVE:
3306 ring->hangcheck.score += BUSY;
3307 break;
3308 case HANGCHECK_KICK:
3309 ring->hangcheck.score += KICK;
3310 break;
3311 case HANGCHECK_HUNG:
3312 ring->hangcheck.score += HUNG;
3313 stuck[i] = true;
3314 break;
3315 }
3316 }
3317 } else {
3318 ring->hangcheck.action = HANGCHECK_ACTIVE;
3319
3320 /* Gradually reduce the count so that we catch DoS
3321 * attempts across multiple batches.
3322 */
3323 if (ring->hangcheck.score > 0)
3324 ring->hangcheck.score--;
3325 }
3326
3327 ring->hangcheck.seqno = seqno;
3328 ring->hangcheck.acthd = acthd;
3329 busy_count += busy;
3330 }
3331
3332 for_each_ring(ring, dev_priv, i) {
3333 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3334 DRM_INFO("%s on %s\n",
3335 stuck[i] ? "stuck" : "no progress",
3336 ring->name);
3337 rings_hung++;
3338 }
3339 }
3340
3341 if (rings_hung)
3342 return i915_handle_error(dev, true, "Ring hung");
3343
3344 if (busy_count)
3345 /* Reset timer case chip hangs without another request
3346 * being added */
3347 i915_queue_hangcheck(dev);
3348 }
3349
3350 void i915_queue_hangcheck(struct drm_device *dev)
3351 {
3352 struct drm_i915_private *dev_priv = dev->dev_private;
3353 if (!i915.enable_hangcheck)
3354 return;
3355
3356 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3357 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3358 }
3359
3360 static void ibx_irq_reset(struct drm_device *dev)
3361 {
3362 struct drm_i915_private *dev_priv = dev->dev_private;
3363
3364 if (HAS_PCH_NOP(dev))
3365 return;
3366
3367 GEN5_IRQ_RESET(SDE);
3368
3369 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3370 I915_WRITE(SERR_INT, 0xffffffff);
3371 }
3372
3373 /*
3374 * SDEIER is also touched by the interrupt handler to work around missed PCH
3375 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3376 * instead we unconditionally enable all PCH interrupt sources here, but then
3377 * only unmask them as needed with SDEIMR.
3378 *
3379 * This function needs to be called before interrupts are enabled.
3380 */
3381 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3382 {
3383 struct drm_i915_private *dev_priv = dev->dev_private;
3384
3385 if (HAS_PCH_NOP(dev))
3386 return;
3387
3388 WARN_ON(I915_READ(SDEIER) != 0);
3389 I915_WRITE(SDEIER, 0xffffffff);
3390 POSTING_READ(SDEIER);
3391 }
3392
3393 static void gen5_gt_irq_reset(struct drm_device *dev)
3394 {
3395 struct drm_i915_private *dev_priv = dev->dev_private;
3396
3397 GEN5_IRQ_RESET(GT);
3398 if (INTEL_INFO(dev)->gen >= 6)
3399 GEN5_IRQ_RESET(GEN6_PM);
3400 }
3401
3402 /* drm_dma.h hooks
3403 */
3404 static void ironlake_irq_reset(struct drm_device *dev)
3405 {
3406 struct drm_i915_private *dev_priv = dev->dev_private;
3407
3408 I915_WRITE(HWSTAM, 0xffffffff);
3409
3410 GEN5_IRQ_RESET(DE);
3411 if (IS_GEN7(dev))
3412 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3413
3414 gen5_gt_irq_reset(dev);
3415
3416 ibx_irq_reset(dev);
3417 }
3418
3419 static void valleyview_irq_preinstall(struct drm_device *dev)
3420 {
3421 struct drm_i915_private *dev_priv = dev->dev_private;
3422 int pipe;
3423
3424 /* VLV magic */
3425 I915_WRITE(VLV_IMR, 0);
3426 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3427 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3428 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3429
3430 /* and GT */
3431 I915_WRITE(GTIIR, I915_READ(GTIIR));
3432 I915_WRITE(GTIIR, I915_READ(GTIIR));
3433
3434 gen5_gt_irq_reset(dev);
3435
3436 I915_WRITE(DPINVGTT, 0xff);
3437
3438 I915_WRITE(PORT_HOTPLUG_EN, 0);
3439 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3440 for_each_pipe(pipe)
3441 I915_WRITE(PIPESTAT(pipe), 0xffff);
3442 I915_WRITE(VLV_IIR, 0xffffffff);
3443 I915_WRITE(VLV_IMR, 0xffffffff);
3444 I915_WRITE(VLV_IER, 0x0);
3445 POSTING_READ(VLV_IER);
3446 }
3447
3448 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3449 {
3450 GEN8_IRQ_RESET_NDX(GT, 0);
3451 GEN8_IRQ_RESET_NDX(GT, 1);
3452 GEN8_IRQ_RESET_NDX(GT, 2);
3453 GEN8_IRQ_RESET_NDX(GT, 3);
3454 }
3455
3456 static void gen8_irq_reset(struct drm_device *dev)
3457 {
3458 struct drm_i915_private *dev_priv = dev->dev_private;
3459 int pipe;
3460
3461 I915_WRITE(GEN8_MASTER_IRQ, 0);
3462 POSTING_READ(GEN8_MASTER_IRQ);
3463
3464 gen8_gt_irq_reset(dev_priv);
3465
3466 for_each_pipe(pipe)
3467 if (intel_display_power_enabled(dev_priv,
3468 POWER_DOMAIN_PIPE(pipe)))
3469 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3470
3471 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3472 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3473 GEN5_IRQ_RESET(GEN8_PCU_);
3474
3475 ibx_irq_reset(dev);
3476 }
3477
3478 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3479 {
3480 unsigned long irqflags;
3481
3482 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3483 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3484 ~dev_priv->de_irq_mask[PIPE_B]);
3485 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3486 ~dev_priv->de_irq_mask[PIPE_C]);
3487 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3488 }
3489
3490 static void cherryview_irq_preinstall(struct drm_device *dev)
3491 {
3492 struct drm_i915_private *dev_priv = dev->dev_private;
3493 int pipe;
3494
3495 I915_WRITE(GEN8_MASTER_IRQ, 0);
3496 POSTING_READ(GEN8_MASTER_IRQ);
3497
3498 gen8_gt_irq_reset(dev_priv);
3499
3500 GEN5_IRQ_RESET(GEN8_PCU_);
3501
3502 POSTING_READ(GEN8_PCU_IIR);
3503
3504 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3505
3506 I915_WRITE(PORT_HOTPLUG_EN, 0);
3507 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3508
3509 for_each_pipe(pipe)
3510 I915_WRITE(PIPESTAT(pipe), 0xffff);
3511
3512 I915_WRITE(VLV_IMR, 0xffffffff);
3513 I915_WRITE(VLV_IER, 0x0);
3514 I915_WRITE(VLV_IIR, 0xffffffff);
3515 POSTING_READ(VLV_IIR);
3516 }
3517
3518 static void ibx_hpd_irq_setup(struct drm_device *dev)
3519 {
3520 struct drm_i915_private *dev_priv = dev->dev_private;
3521 struct drm_mode_config *mode_config = &dev->mode_config;
3522 struct intel_encoder *intel_encoder;
3523 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3524
3525 if (HAS_PCH_IBX(dev)) {
3526 hotplug_irqs = SDE_HOTPLUG_MASK;
3527 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3528 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3529 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3530 } else {
3531 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3532 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3533 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3534 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3535 }
3536
3537 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3538
3539 /*
3540 * Enable digital hotplug on the PCH, and configure the DP short pulse
3541 * duration to 2ms (which is the minimum in the Display Port spec)
3542 *
3543 * This register is the same on all known PCH chips.
3544 */
3545 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3546 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3547 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3548 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3549 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3550 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3551 }
3552
3553 static void ibx_irq_postinstall(struct drm_device *dev)
3554 {
3555 struct drm_i915_private *dev_priv = dev->dev_private;
3556 u32 mask;
3557
3558 if (HAS_PCH_NOP(dev))
3559 return;
3560
3561 if (HAS_PCH_IBX(dev))
3562 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3563 else
3564 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3565
3566 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3567 I915_WRITE(SDEIMR, ~mask);
3568 }
3569
3570 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3571 {
3572 struct drm_i915_private *dev_priv = dev->dev_private;
3573 u32 pm_irqs, gt_irqs;
3574
3575 pm_irqs = gt_irqs = 0;
3576
3577 dev_priv->gt_irq_mask = ~0;
3578 if (HAS_L3_DPF(dev)) {
3579 /* L3 parity interrupt is always unmasked. */
3580 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3581 gt_irqs |= GT_PARITY_ERROR(dev);
3582 }
3583
3584 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3585 if (IS_GEN5(dev)) {
3586 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3587 ILK_BSD_USER_INTERRUPT;
3588 } else {
3589 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3590 }
3591
3592 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3593
3594 if (INTEL_INFO(dev)->gen >= 6) {
3595 pm_irqs |= dev_priv->pm_rps_events;
3596
3597 if (HAS_VEBOX(dev))
3598 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3599
3600 dev_priv->pm_irq_mask = 0xffffffff;
3601 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3602 }
3603 }
3604
3605 static int ironlake_irq_postinstall(struct drm_device *dev)
3606 {
3607 unsigned long irqflags;
3608 struct drm_i915_private *dev_priv = dev->dev_private;
3609 u32 display_mask, extra_mask;
3610
3611 if (INTEL_INFO(dev)->gen >= 7) {
3612 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3613 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3614 DE_PLANEB_FLIP_DONE_IVB |
3615 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3616 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3617 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3618 } else {
3619 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3620 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3621 DE_AUX_CHANNEL_A |
3622 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3623 DE_POISON);
3624 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3625 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3626 }
3627
3628 dev_priv->irq_mask = ~display_mask;
3629
3630 I915_WRITE(HWSTAM, 0xeffe);
3631
3632 ibx_irq_pre_postinstall(dev);
3633
3634 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3635
3636 gen5_gt_irq_postinstall(dev);
3637
3638 ibx_irq_postinstall(dev);
3639
3640 if (IS_IRONLAKE_M(dev)) {
3641 /* Enable PCU event interrupts
3642 *
3643 * spinlocking not required here for correctness since interrupt
3644 * setup is guaranteed to run in single-threaded context. But we
3645 * need it to make the assert_spin_locked happy. */
3646 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3647 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3648 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3649 }
3650
3651 return 0;
3652 }
3653
3654 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3655 {
3656 u32 pipestat_mask;
3657 u32 iir_mask;
3658
3659 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3660 PIPE_FIFO_UNDERRUN_STATUS;
3661
3662 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3663 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3664 POSTING_READ(PIPESTAT(PIPE_A));
3665
3666 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3667 PIPE_CRC_DONE_INTERRUPT_STATUS;
3668
3669 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3670 PIPE_GMBUS_INTERRUPT_STATUS);
3671 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3672
3673 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3674 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3675 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3676 dev_priv->irq_mask &= ~iir_mask;
3677
3678 I915_WRITE(VLV_IIR, iir_mask);
3679 I915_WRITE(VLV_IIR, iir_mask);
3680 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3681 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3682 POSTING_READ(VLV_IER);
3683 }
3684
3685 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3686 {
3687 u32 pipestat_mask;
3688 u32 iir_mask;
3689
3690 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3691 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3692 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3693
3694 dev_priv->irq_mask |= iir_mask;
3695 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3696 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3697 I915_WRITE(VLV_IIR, iir_mask);
3698 I915_WRITE(VLV_IIR, iir_mask);
3699 POSTING_READ(VLV_IIR);
3700
3701 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3702 PIPE_CRC_DONE_INTERRUPT_STATUS;
3703
3704 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3705 PIPE_GMBUS_INTERRUPT_STATUS);
3706 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3707
3708 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3709 PIPE_FIFO_UNDERRUN_STATUS;
3710 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3711 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3712 POSTING_READ(PIPESTAT(PIPE_A));
3713 }
3714
3715 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3716 {
3717 assert_spin_locked(&dev_priv->irq_lock);
3718
3719 if (dev_priv->display_irqs_enabled)
3720 return;
3721
3722 dev_priv->display_irqs_enabled = true;
3723
3724 if (dev_priv->dev->irq_enabled)
3725 valleyview_display_irqs_install(dev_priv);
3726 }
3727
3728 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3729 {
3730 assert_spin_locked(&dev_priv->irq_lock);
3731
3732 if (!dev_priv->display_irqs_enabled)
3733 return;
3734
3735 dev_priv->display_irqs_enabled = false;
3736
3737 if (dev_priv->dev->irq_enabled)
3738 valleyview_display_irqs_uninstall(dev_priv);
3739 }
3740
3741 static int valleyview_irq_postinstall(struct drm_device *dev)
3742 {
3743 struct drm_i915_private *dev_priv = dev->dev_private;
3744 unsigned long irqflags;
3745
3746 dev_priv->irq_mask = ~0;
3747
3748 I915_WRITE(PORT_HOTPLUG_EN, 0);
3749 POSTING_READ(PORT_HOTPLUG_EN);
3750
3751 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3752 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3753 I915_WRITE(VLV_IIR, 0xffffffff);
3754 POSTING_READ(VLV_IER);
3755
3756 /* Interrupt setup is already guaranteed to be single-threaded, this is
3757 * just to make the assert_spin_locked check happy. */
3758 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3759 if (dev_priv->display_irqs_enabled)
3760 valleyview_display_irqs_install(dev_priv);
3761 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3762
3763 I915_WRITE(VLV_IIR, 0xffffffff);
3764 I915_WRITE(VLV_IIR, 0xffffffff);
3765
3766 gen5_gt_irq_postinstall(dev);
3767
3768 /* ack & enable invalid PTE error interrupts */
3769 #if 0 /* FIXME: add support to irq handler for checking these bits */
3770 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3771 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3772 #endif
3773
3774 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3775
3776 return 0;
3777 }
3778
3779 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3780 {
3781 int i;
3782
3783 /* These are interrupts we'll toggle with the ring mask register */
3784 uint32_t gt_interrupts[] = {
3785 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3786 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3787 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3788 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3789 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3790 0,
3791 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3792 };
3793
3794 for (i = 0; i < ARRAY_SIZE(gt_interrupts); i++)
3795 GEN8_IRQ_INIT_NDX(GT, i, ~gt_interrupts[i], gt_interrupts[i]);
3796
3797 dev_priv->pm_irq_mask = 0xffffffff;
3798 }
3799
3800 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3801 {
3802 struct drm_device *dev = dev_priv->dev;
3803 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3804 GEN8_PIPE_CDCLK_CRC_DONE |
3805 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3806 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3807 GEN8_PIPE_FIFO_UNDERRUN;
3808 int pipe;
3809 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3810 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3811 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3812
3813 for_each_pipe(pipe)
3814 if (intel_display_power_enabled(dev_priv,
3815 POWER_DOMAIN_PIPE(pipe)))
3816 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3817 dev_priv->de_irq_mask[pipe],
3818 de_pipe_enables);
3819
3820 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3821 }
3822
3823 static int gen8_irq_postinstall(struct drm_device *dev)
3824 {
3825 struct drm_i915_private *dev_priv = dev->dev_private;
3826
3827 ibx_irq_pre_postinstall(dev);
3828
3829 gen8_gt_irq_postinstall(dev_priv);
3830 gen8_de_irq_postinstall(dev_priv);
3831
3832 ibx_irq_postinstall(dev);
3833
3834 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3835 POSTING_READ(GEN8_MASTER_IRQ);
3836
3837 return 0;
3838 }
3839
3840 static int cherryview_irq_postinstall(struct drm_device *dev)
3841 {
3842 struct drm_i915_private *dev_priv = dev->dev_private;
3843 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3844 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3845 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3846 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3847 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3848 PIPE_CRC_DONE_INTERRUPT_STATUS;
3849 unsigned long irqflags;
3850 int pipe;
3851
3852 /*
3853 * Leave vblank interrupts masked initially. enable/disable will
3854 * toggle them based on usage.
3855 */
3856 dev_priv->irq_mask = ~enable_mask;
3857
3858 for_each_pipe(pipe)
3859 I915_WRITE(PIPESTAT(pipe), 0xffff);
3860
3861 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3862 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3863 for_each_pipe(pipe)
3864 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3865 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3866
3867 I915_WRITE(VLV_IIR, 0xffffffff);
3868 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3869 I915_WRITE(VLV_IER, enable_mask);
3870
3871 gen8_gt_irq_postinstall(dev_priv);
3872
3873 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3874 POSTING_READ(GEN8_MASTER_IRQ);
3875
3876 return 0;
3877 }
3878
3879 static void gen8_irq_uninstall(struct drm_device *dev)
3880 {
3881 struct drm_i915_private *dev_priv = dev->dev_private;
3882
3883 if (!dev_priv)
3884 return;
3885
3886 intel_hpd_irq_uninstall(dev_priv);
3887
3888 gen8_irq_reset(dev);
3889 }
3890
3891 static void valleyview_irq_uninstall(struct drm_device *dev)
3892 {
3893 struct drm_i915_private *dev_priv = dev->dev_private;
3894 unsigned long irqflags;
3895 int pipe;
3896
3897 if (!dev_priv)
3898 return;
3899
3900 I915_WRITE(VLV_MASTER_IER, 0);
3901
3902 intel_hpd_irq_uninstall(dev_priv);
3903
3904 for_each_pipe(pipe)
3905 I915_WRITE(PIPESTAT(pipe), 0xffff);
3906
3907 I915_WRITE(HWSTAM, 0xffffffff);
3908 I915_WRITE(PORT_HOTPLUG_EN, 0);
3909 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3910
3911 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3912 if (dev_priv->display_irqs_enabled)
3913 valleyview_display_irqs_uninstall(dev_priv);
3914 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3915
3916 dev_priv->irq_mask = 0;
3917
3918 I915_WRITE(VLV_IIR, 0xffffffff);
3919 I915_WRITE(VLV_IMR, 0xffffffff);
3920 I915_WRITE(VLV_IER, 0x0);
3921 POSTING_READ(VLV_IER);
3922 }
3923
3924 static void cherryview_irq_uninstall(struct drm_device *dev)
3925 {
3926 struct drm_i915_private *dev_priv = dev->dev_private;
3927 int pipe;
3928
3929 if (!dev_priv)
3930 return;
3931
3932 I915_WRITE(GEN8_MASTER_IRQ, 0);
3933 POSTING_READ(GEN8_MASTER_IRQ);
3934
3935 #define GEN8_IRQ_FINI_NDX(type, which) \
3936 do { \
3937 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3938 I915_WRITE(GEN8_##type##_IER(which), 0); \
3939 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3940 POSTING_READ(GEN8_##type##_IIR(which)); \
3941 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3942 } while (0)
3943
3944 #define GEN8_IRQ_FINI(type) \
3945 do { \
3946 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3947 I915_WRITE(GEN8_##type##_IER, 0); \
3948 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3949 POSTING_READ(GEN8_##type##_IIR); \
3950 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3951 } while (0)
3952
3953 GEN8_IRQ_FINI_NDX(GT, 0);
3954 GEN8_IRQ_FINI_NDX(GT, 1);
3955 GEN8_IRQ_FINI_NDX(GT, 2);
3956 GEN8_IRQ_FINI_NDX(GT, 3);
3957
3958 GEN8_IRQ_FINI(PCU);
3959
3960 #undef GEN8_IRQ_FINI
3961 #undef GEN8_IRQ_FINI_NDX
3962
3963 I915_WRITE(PORT_HOTPLUG_EN, 0);
3964 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3965
3966 for_each_pipe(pipe)
3967 I915_WRITE(PIPESTAT(pipe), 0xffff);
3968
3969 I915_WRITE(VLV_IMR, 0xffffffff);
3970 I915_WRITE(VLV_IER, 0x0);
3971 I915_WRITE(VLV_IIR, 0xffffffff);
3972 POSTING_READ(VLV_IIR);
3973 }
3974
3975 static void ironlake_irq_uninstall(struct drm_device *dev)
3976 {
3977 struct drm_i915_private *dev_priv = dev->dev_private;
3978
3979 if (!dev_priv)
3980 return;
3981
3982 intel_hpd_irq_uninstall(dev_priv);
3983
3984 ironlake_irq_reset(dev);
3985 }
3986
3987 static void i8xx_irq_preinstall(struct drm_device * dev)
3988 {
3989 struct drm_i915_private *dev_priv = dev->dev_private;
3990 int pipe;
3991
3992 for_each_pipe(pipe)
3993 I915_WRITE(PIPESTAT(pipe), 0);
3994 I915_WRITE16(IMR, 0xffff);
3995 I915_WRITE16(IER, 0x0);
3996 POSTING_READ16(IER);
3997 }
3998
3999 static int i8xx_irq_postinstall(struct drm_device *dev)
4000 {
4001 struct drm_i915_private *dev_priv = dev->dev_private;
4002 unsigned long irqflags;
4003
4004 I915_WRITE16(EMR,
4005 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4006
4007 /* Unmask the interrupts that we always want on. */
4008 dev_priv->irq_mask =
4009 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4010 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4011 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4012 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4013 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4014 I915_WRITE16(IMR, dev_priv->irq_mask);
4015
4016 I915_WRITE16(IER,
4017 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4018 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4019 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4020 I915_USER_INTERRUPT);
4021 POSTING_READ16(IER);
4022
4023 /* Interrupt setup is already guaranteed to be single-threaded, this is
4024 * just to make the assert_spin_locked check happy. */
4025 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4026 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4027 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4028 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4029
4030 return 0;
4031 }
4032
4033 /*
4034 * Returns true when a page flip has completed.
4035 */
4036 static bool i8xx_handle_vblank(struct drm_device *dev,
4037 int plane, int pipe, u32 iir)
4038 {
4039 struct drm_i915_private *dev_priv = dev->dev_private;
4040 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4041
4042 if (!intel_pipe_handle_vblank(dev, pipe))
4043 return false;
4044
4045 if ((iir & flip_pending) == 0)
4046 return false;
4047
4048 intel_prepare_page_flip(dev, plane);
4049
4050 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4051 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4052 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4053 * the flip is completed (no longer pending). Since this doesn't raise
4054 * an interrupt per se, we watch for the change at vblank.
4055 */
4056 if (I915_READ16(ISR) & flip_pending)
4057 return false;
4058
4059 intel_finish_page_flip(dev, pipe);
4060
4061 return true;
4062 }
4063
4064 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4065 {
4066 struct drm_device *dev = arg;
4067 struct drm_i915_private *dev_priv = dev->dev_private;
4068 u16 iir, new_iir;
4069 u32 pipe_stats[2];
4070 unsigned long irqflags;
4071 int pipe;
4072 u16 flip_mask =
4073 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4074 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4075
4076 iir = I915_READ16(IIR);
4077 if (iir == 0)
4078 return IRQ_NONE;
4079
4080 while (iir & ~flip_mask) {
4081 /* Can't rely on pipestat interrupt bit in iir as it might
4082 * have been cleared after the pipestat interrupt was received.
4083 * It doesn't set the bit in iir again, but it still produces
4084 * interrupts (for non-MSI).
4085 */
4086 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4087 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4088 i915_handle_error(dev, false,
4089 "Command parser error, iir 0x%08x",
4090 iir);
4091
4092 for_each_pipe(pipe) {
4093 int reg = PIPESTAT(pipe);
4094 pipe_stats[pipe] = I915_READ(reg);
4095
4096 /*
4097 * Clear the PIPE*STAT regs before the IIR
4098 */
4099 if (pipe_stats[pipe] & 0x8000ffff)
4100 I915_WRITE(reg, pipe_stats[pipe]);
4101 }
4102 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4103
4104 I915_WRITE16(IIR, iir & ~flip_mask);
4105 new_iir = I915_READ16(IIR); /* Flush posted writes */
4106
4107 i915_update_dri1_breadcrumb(dev);
4108
4109 if (iir & I915_USER_INTERRUPT)
4110 notify_ring(dev, &dev_priv->ring[RCS]);
4111
4112 for_each_pipe(pipe) {
4113 int plane = pipe;
4114 if (HAS_FBC(dev))
4115 plane = !plane;
4116
4117 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4118 i8xx_handle_vblank(dev, plane, pipe, iir))
4119 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4120
4121 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4122 i9xx_pipe_crc_irq_handler(dev, pipe);
4123
4124 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4125 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4126 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4127 }
4128
4129 iir = new_iir;
4130 }
4131
4132 return IRQ_HANDLED;
4133 }
4134
4135 static void i8xx_irq_uninstall(struct drm_device * dev)
4136 {
4137 struct drm_i915_private *dev_priv = dev->dev_private;
4138 int pipe;
4139
4140 for_each_pipe(pipe) {
4141 /* Clear enable bits; then clear status bits */
4142 I915_WRITE(PIPESTAT(pipe), 0);
4143 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4144 }
4145 I915_WRITE16(IMR, 0xffff);
4146 I915_WRITE16(IER, 0x0);
4147 I915_WRITE16(IIR, I915_READ16(IIR));
4148 }
4149
4150 static void i915_irq_preinstall(struct drm_device * dev)
4151 {
4152 struct drm_i915_private *dev_priv = dev->dev_private;
4153 int pipe;
4154
4155 if (I915_HAS_HOTPLUG(dev)) {
4156 I915_WRITE(PORT_HOTPLUG_EN, 0);
4157 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4158 }
4159
4160 I915_WRITE16(HWSTAM, 0xeffe);
4161 for_each_pipe(pipe)
4162 I915_WRITE(PIPESTAT(pipe), 0);
4163 I915_WRITE(IMR, 0xffffffff);
4164 I915_WRITE(IER, 0x0);
4165 POSTING_READ(IER);
4166 }
4167
4168 static int i915_irq_postinstall(struct drm_device *dev)
4169 {
4170 struct drm_i915_private *dev_priv = dev->dev_private;
4171 u32 enable_mask;
4172 unsigned long irqflags;
4173
4174 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4175
4176 /* Unmask the interrupts that we always want on. */
4177 dev_priv->irq_mask =
4178 ~(I915_ASLE_INTERRUPT |
4179 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4180 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4181 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4182 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4183 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4184
4185 enable_mask =
4186 I915_ASLE_INTERRUPT |
4187 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4188 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4189 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4190 I915_USER_INTERRUPT;
4191
4192 if (I915_HAS_HOTPLUG(dev)) {
4193 I915_WRITE(PORT_HOTPLUG_EN, 0);
4194 POSTING_READ(PORT_HOTPLUG_EN);
4195
4196 /* Enable in IER... */
4197 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4198 /* and unmask in IMR */
4199 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4200 }
4201
4202 I915_WRITE(IMR, dev_priv->irq_mask);
4203 I915_WRITE(IER, enable_mask);
4204 POSTING_READ(IER);
4205
4206 i915_enable_asle_pipestat(dev);
4207
4208 /* Interrupt setup is already guaranteed to be single-threaded, this is
4209 * just to make the assert_spin_locked check happy. */
4210 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4211 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4212 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4213 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4214
4215 return 0;
4216 }
4217
4218 /*
4219 * Returns true when a page flip has completed.
4220 */
4221 static bool i915_handle_vblank(struct drm_device *dev,
4222 int plane, int pipe, u32 iir)
4223 {
4224 struct drm_i915_private *dev_priv = dev->dev_private;
4225 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4226
4227 if (!intel_pipe_handle_vblank(dev, pipe))
4228 return false;
4229
4230 if ((iir & flip_pending) == 0)
4231 return false;
4232
4233 intel_prepare_page_flip(dev, plane);
4234
4235 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4236 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4237 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4238 * the flip is completed (no longer pending). Since this doesn't raise
4239 * an interrupt per se, we watch for the change at vblank.
4240 */
4241 if (I915_READ(ISR) & flip_pending)
4242 return false;
4243
4244 intel_finish_page_flip(dev, pipe);
4245
4246 return true;
4247 }
4248
4249 static irqreturn_t i915_irq_handler(int irq, void *arg)
4250 {
4251 struct drm_device *dev = arg;
4252 struct drm_i915_private *dev_priv = dev->dev_private;
4253 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4254 unsigned long irqflags;
4255 u32 flip_mask =
4256 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4257 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4258 int pipe, ret = IRQ_NONE;
4259
4260 iir = I915_READ(IIR);
4261 do {
4262 bool irq_received = (iir & ~flip_mask) != 0;
4263 bool blc_event = false;
4264
4265 /* Can't rely on pipestat interrupt bit in iir as it might
4266 * have been cleared after the pipestat interrupt was received.
4267 * It doesn't set the bit in iir again, but it still produces
4268 * interrupts (for non-MSI).
4269 */
4270 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4271 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4272 i915_handle_error(dev, false,
4273 "Command parser error, iir 0x%08x",
4274 iir);
4275
4276 for_each_pipe(pipe) {
4277 int reg = PIPESTAT(pipe);
4278 pipe_stats[pipe] = I915_READ(reg);
4279
4280 /* Clear the PIPE*STAT regs before the IIR */
4281 if (pipe_stats[pipe] & 0x8000ffff) {
4282 I915_WRITE(reg, pipe_stats[pipe]);
4283 irq_received = true;
4284 }
4285 }
4286 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4287
4288 if (!irq_received)
4289 break;
4290
4291 /* Consume port. Then clear IIR or we'll miss events */
4292 if (I915_HAS_HOTPLUG(dev) &&
4293 iir & I915_DISPLAY_PORT_INTERRUPT)
4294 i9xx_hpd_irq_handler(dev);
4295
4296 I915_WRITE(IIR, iir & ~flip_mask);
4297 new_iir = I915_READ(IIR); /* Flush posted writes */
4298
4299 if (iir & I915_USER_INTERRUPT)
4300 notify_ring(dev, &dev_priv->ring[RCS]);
4301
4302 for_each_pipe(pipe) {
4303 int plane = pipe;
4304 if (HAS_FBC(dev))
4305 plane = !plane;
4306
4307 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4308 i915_handle_vblank(dev, plane, pipe, iir))
4309 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4310
4311 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4312 blc_event = true;
4313
4314 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4315 i9xx_pipe_crc_irq_handler(dev, pipe);
4316
4317 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4318 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4319 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4320 }
4321
4322 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4323 intel_opregion_asle_intr(dev);
4324
4325 /* With MSI, interrupts are only generated when iir
4326 * transitions from zero to nonzero. If another bit got
4327 * set while we were handling the existing iir bits, then
4328 * we would never get another interrupt.
4329 *
4330 * This is fine on non-MSI as well, as if we hit this path
4331 * we avoid exiting the interrupt handler only to generate
4332 * another one.
4333 *
4334 * Note that for MSI this could cause a stray interrupt report
4335 * if an interrupt landed in the time between writing IIR and
4336 * the posting read. This should be rare enough to never
4337 * trigger the 99% of 100,000 interrupts test for disabling
4338 * stray interrupts.
4339 */
4340 ret = IRQ_HANDLED;
4341 iir = new_iir;
4342 } while (iir & ~flip_mask);
4343
4344 i915_update_dri1_breadcrumb(dev);
4345
4346 return ret;
4347 }
4348
4349 static void i915_irq_uninstall(struct drm_device * dev)
4350 {
4351 struct drm_i915_private *dev_priv = dev->dev_private;
4352 int pipe;
4353
4354 intel_hpd_irq_uninstall(dev_priv);
4355
4356 if (I915_HAS_HOTPLUG(dev)) {
4357 I915_WRITE(PORT_HOTPLUG_EN, 0);
4358 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4359 }
4360
4361 I915_WRITE16(HWSTAM, 0xffff);
4362 for_each_pipe(pipe) {
4363 /* Clear enable bits; then clear status bits */
4364 I915_WRITE(PIPESTAT(pipe), 0);
4365 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4366 }
4367 I915_WRITE(IMR, 0xffffffff);
4368 I915_WRITE(IER, 0x0);
4369
4370 I915_WRITE(IIR, I915_READ(IIR));
4371 }
4372
4373 static void i965_irq_preinstall(struct drm_device * dev)
4374 {
4375 struct drm_i915_private *dev_priv = dev->dev_private;
4376 int pipe;
4377
4378 I915_WRITE(PORT_HOTPLUG_EN, 0);
4379 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4380
4381 I915_WRITE(HWSTAM, 0xeffe);
4382 for_each_pipe(pipe)
4383 I915_WRITE(PIPESTAT(pipe), 0);
4384 I915_WRITE(IMR, 0xffffffff);
4385 I915_WRITE(IER, 0x0);
4386 POSTING_READ(IER);
4387 }
4388
4389 static int i965_irq_postinstall(struct drm_device *dev)
4390 {
4391 struct drm_i915_private *dev_priv = dev->dev_private;
4392 u32 enable_mask;
4393 u32 error_mask;
4394 unsigned long irqflags;
4395
4396 /* Unmask the interrupts that we always want on. */
4397 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4398 I915_DISPLAY_PORT_INTERRUPT |
4399 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4400 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4401 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4402 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4403 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4404
4405 enable_mask = ~dev_priv->irq_mask;
4406 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4407 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4408 enable_mask |= I915_USER_INTERRUPT;
4409
4410 if (IS_G4X(dev))
4411 enable_mask |= I915_BSD_USER_INTERRUPT;
4412
4413 /* Interrupt setup is already guaranteed to be single-threaded, this is
4414 * just to make the assert_spin_locked check happy. */
4415 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4416 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4417 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4418 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4419 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4420
4421 /*
4422 * Enable some error detection, note the instruction error mask
4423 * bit is reserved, so we leave it masked.
4424 */
4425 if (IS_G4X(dev)) {
4426 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4427 GM45_ERROR_MEM_PRIV |
4428 GM45_ERROR_CP_PRIV |
4429 I915_ERROR_MEMORY_REFRESH);
4430 } else {
4431 error_mask = ~(I915_ERROR_PAGE_TABLE |
4432 I915_ERROR_MEMORY_REFRESH);
4433 }
4434 I915_WRITE(EMR, error_mask);
4435
4436 I915_WRITE(IMR, dev_priv->irq_mask);
4437 I915_WRITE(IER, enable_mask);
4438 POSTING_READ(IER);
4439
4440 I915_WRITE(PORT_HOTPLUG_EN, 0);
4441 POSTING_READ(PORT_HOTPLUG_EN);
4442
4443 i915_enable_asle_pipestat(dev);
4444
4445 return 0;
4446 }
4447
4448 static void i915_hpd_irq_setup(struct drm_device *dev)
4449 {
4450 struct drm_i915_private *dev_priv = dev->dev_private;
4451 struct drm_mode_config *mode_config = &dev->mode_config;
4452 struct intel_encoder *intel_encoder;
4453 u32 hotplug_en;
4454
4455 assert_spin_locked(&dev_priv->irq_lock);
4456
4457 if (I915_HAS_HOTPLUG(dev)) {
4458 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4459 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4460 /* Note HDMI and DP share hotplug bits */
4461 /* enable bits are the same for all generations */
4462 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
4463 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4464 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4465 /* Programming the CRT detection parameters tends
4466 to generate a spurious hotplug event about three
4467 seconds later. So just do it once.
4468 */
4469 if (IS_G4X(dev))
4470 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4471 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4472 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4473
4474 /* Ignore TV since it's buggy */
4475 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4476 }
4477 }
4478
4479 static irqreturn_t i965_irq_handler(int irq, void *arg)
4480 {
4481 struct drm_device *dev = arg;
4482 struct drm_i915_private *dev_priv = dev->dev_private;
4483 u32 iir, new_iir;
4484 u32 pipe_stats[I915_MAX_PIPES];
4485 unsigned long irqflags;
4486 int ret = IRQ_NONE, pipe;
4487 u32 flip_mask =
4488 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4489 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4490
4491 iir = I915_READ(IIR);
4492
4493 for (;;) {
4494 bool irq_received = (iir & ~flip_mask) != 0;
4495 bool blc_event = false;
4496
4497 /* Can't rely on pipestat interrupt bit in iir as it might
4498 * have been cleared after the pipestat interrupt was received.
4499 * It doesn't set the bit in iir again, but it still produces
4500 * interrupts (for non-MSI).
4501 */
4502 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4503 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4504 i915_handle_error(dev, false,
4505 "Command parser error, iir 0x%08x",
4506 iir);
4507
4508 for_each_pipe(pipe) {
4509 int reg = PIPESTAT(pipe);
4510 pipe_stats[pipe] = I915_READ(reg);
4511
4512 /*
4513 * Clear the PIPE*STAT regs before the IIR
4514 */
4515 if (pipe_stats[pipe] & 0x8000ffff) {
4516 I915_WRITE(reg, pipe_stats[pipe]);
4517 irq_received = true;
4518 }
4519 }
4520 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4521
4522 if (!irq_received)
4523 break;
4524
4525 ret = IRQ_HANDLED;
4526
4527 /* Consume port. Then clear IIR or we'll miss events */
4528 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4529 i9xx_hpd_irq_handler(dev);
4530
4531 I915_WRITE(IIR, iir & ~flip_mask);
4532 new_iir = I915_READ(IIR); /* Flush posted writes */
4533
4534 if (iir & I915_USER_INTERRUPT)
4535 notify_ring(dev, &dev_priv->ring[RCS]);
4536 if (iir & I915_BSD_USER_INTERRUPT)
4537 notify_ring(dev, &dev_priv->ring[VCS]);
4538
4539 for_each_pipe(pipe) {
4540 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4541 i915_handle_vblank(dev, pipe, pipe, iir))
4542 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4543
4544 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4545 blc_event = true;
4546
4547 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4548 i9xx_pipe_crc_irq_handler(dev, pipe);
4549
4550 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4551 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4552 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4553 }
4554
4555 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4556 intel_opregion_asle_intr(dev);
4557
4558 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4559 gmbus_irq_handler(dev);
4560
4561 /* With MSI, interrupts are only generated when iir
4562 * transitions from zero to nonzero. If another bit got
4563 * set while we were handling the existing iir bits, then
4564 * we would never get another interrupt.
4565 *
4566 * This is fine on non-MSI as well, as if we hit this path
4567 * we avoid exiting the interrupt handler only to generate
4568 * another one.
4569 *
4570 * Note that for MSI this could cause a stray interrupt report
4571 * if an interrupt landed in the time between writing IIR and
4572 * the posting read. This should be rare enough to never
4573 * trigger the 99% of 100,000 interrupts test for disabling
4574 * stray interrupts.
4575 */
4576 iir = new_iir;
4577 }
4578
4579 i915_update_dri1_breadcrumb(dev);
4580
4581 return ret;
4582 }
4583
4584 static void i965_irq_uninstall(struct drm_device * dev)
4585 {
4586 struct drm_i915_private *dev_priv = dev->dev_private;
4587 int pipe;
4588
4589 if (!dev_priv)
4590 return;
4591
4592 intel_hpd_irq_uninstall(dev_priv);
4593
4594 I915_WRITE(PORT_HOTPLUG_EN, 0);
4595 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4596
4597 I915_WRITE(HWSTAM, 0xffffffff);
4598 for_each_pipe(pipe)
4599 I915_WRITE(PIPESTAT(pipe), 0);
4600 I915_WRITE(IMR, 0xffffffff);
4601 I915_WRITE(IER, 0x0);
4602
4603 for_each_pipe(pipe)
4604 I915_WRITE(PIPESTAT(pipe),
4605 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4606 I915_WRITE(IIR, I915_READ(IIR));
4607 }
4608
4609 static void intel_hpd_irq_reenable(unsigned long data)
4610 {
4611 struct drm_i915_private *dev_priv = (struct drm_i915_private *)data;
4612 struct drm_device *dev = dev_priv->dev;
4613 struct drm_mode_config *mode_config = &dev->mode_config;
4614 unsigned long irqflags;
4615 int i;
4616
4617 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4618 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4619 struct drm_connector *connector;
4620
4621 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4622 continue;
4623
4624 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4625
4626 list_for_each_entry(connector, &mode_config->connector_list, head) {
4627 struct intel_connector *intel_connector = to_intel_connector(connector);
4628
4629 if (intel_connector->encoder->hpd_pin == i) {
4630 if (connector->polled != intel_connector->polled)
4631 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4632 connector->name);
4633 connector->polled = intel_connector->polled;
4634 if (!connector->polled)
4635 connector->polled = DRM_CONNECTOR_POLL_HPD;
4636 }
4637 }
4638 }
4639 if (dev_priv->display.hpd_irq_setup)
4640 dev_priv->display.hpd_irq_setup(dev);
4641 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4642 }
4643
4644 void intel_irq_init(struct drm_device *dev)
4645 {
4646 struct drm_i915_private *dev_priv = dev->dev_private;
4647
4648 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4649 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4650 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4651 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4652 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4653
4654 /* Let's track the enabled rps events */
4655 if (IS_VALLEYVIEW(dev))
4656 /* WaGsvRC0ResidenncyMethod:VLV */
4657 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4658 else
4659 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4660
4661 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4662 i915_hangcheck_elapsed,
4663 (unsigned long) dev);
4664 setup_timer(&dev_priv->hotplug_reenable_timer, intel_hpd_irq_reenable,
4665 (unsigned long) dev_priv);
4666
4667 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4668
4669 /* Haven't installed the IRQ handler yet */
4670 dev_priv->pm._irqs_disabled = true;
4671
4672 if (IS_GEN2(dev)) {
4673 dev->max_vblank_count = 0;
4674 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4675 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4676 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4677 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4678 } else {
4679 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4680 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4681 }
4682
4683 /*
4684 * Opt out of the vblank disable timer on everything except gen2.
4685 * Gen2 doesn't have a hardware frame counter and so depends on
4686 * vblank interrupts to produce sane vblank seuquence numbers.
4687 */
4688 if (!IS_GEN2(dev))
4689 dev->vblank_disable_immediate = true;
4690
4691 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4692 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4693 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4694 }
4695
4696 if (IS_CHERRYVIEW(dev)) {
4697 dev->driver->irq_handler = cherryview_irq_handler;
4698 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4699 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4700 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4701 dev->driver->enable_vblank = valleyview_enable_vblank;
4702 dev->driver->disable_vblank = valleyview_disable_vblank;
4703 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4704 } else if (IS_VALLEYVIEW(dev)) {
4705 dev->driver->irq_handler = valleyview_irq_handler;
4706 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4707 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4708 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4709 dev->driver->enable_vblank = valleyview_enable_vblank;
4710 dev->driver->disable_vblank = valleyview_disable_vblank;
4711 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4712 } else if (IS_GEN8(dev)) {
4713 dev->driver->irq_handler = gen8_irq_handler;
4714 dev->driver->irq_preinstall = gen8_irq_reset;
4715 dev->driver->irq_postinstall = gen8_irq_postinstall;
4716 dev->driver->irq_uninstall = gen8_irq_uninstall;
4717 dev->driver->enable_vblank = gen8_enable_vblank;
4718 dev->driver->disable_vblank = gen8_disable_vblank;
4719 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4720 } else if (HAS_PCH_SPLIT(dev)) {
4721 dev->driver->irq_handler = ironlake_irq_handler;
4722 dev->driver->irq_preinstall = ironlake_irq_reset;
4723 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4724 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4725 dev->driver->enable_vblank = ironlake_enable_vblank;
4726 dev->driver->disable_vblank = ironlake_disable_vblank;
4727 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4728 } else {
4729 if (INTEL_INFO(dev)->gen == 2) {
4730 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4731 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4732 dev->driver->irq_handler = i8xx_irq_handler;
4733 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4734 } else if (INTEL_INFO(dev)->gen == 3) {
4735 dev->driver->irq_preinstall = i915_irq_preinstall;
4736 dev->driver->irq_postinstall = i915_irq_postinstall;
4737 dev->driver->irq_uninstall = i915_irq_uninstall;
4738 dev->driver->irq_handler = i915_irq_handler;
4739 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4740 } else {
4741 dev->driver->irq_preinstall = i965_irq_preinstall;
4742 dev->driver->irq_postinstall = i965_irq_postinstall;
4743 dev->driver->irq_uninstall = i965_irq_uninstall;
4744 dev->driver->irq_handler = i965_irq_handler;
4745 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4746 }
4747 dev->driver->enable_vblank = i915_enable_vblank;
4748 dev->driver->disable_vblank = i915_disable_vblank;
4749 }
4750 }
4751
4752 void intel_hpd_init(struct drm_device *dev)
4753 {
4754 struct drm_i915_private *dev_priv = dev->dev_private;
4755 struct drm_mode_config *mode_config = &dev->mode_config;
4756 struct drm_connector *connector;
4757 unsigned long irqflags;
4758 int i;
4759
4760 for (i = 1; i < HPD_NUM_PINS; i++) {
4761 dev_priv->hpd_stats[i].hpd_cnt = 0;
4762 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4763 }
4764 list_for_each_entry(connector, &mode_config->connector_list, head) {
4765 struct intel_connector *intel_connector = to_intel_connector(connector);
4766 connector->polled = intel_connector->polled;
4767 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4768 connector->polled = DRM_CONNECTOR_POLL_HPD;
4769 if (intel_connector->mst_port)
4770 connector->polled = DRM_CONNECTOR_POLL_HPD;
4771 }
4772
4773 /* Interrupt setup is already guaranteed to be single-threaded, this is
4774 * just to make the assert_spin_locked checks happy. */
4775 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
4776 if (dev_priv->display.hpd_irq_setup)
4777 dev_priv->display.hpd_irq_setup(dev);
4778 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
4779 }
4780
4781 /* Disable interrupts so we can allow runtime PM. */
4782 void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4783 {
4784 struct drm_i915_private *dev_priv = dev->dev_private;
4785
4786 dev->driver->irq_uninstall(dev);
4787 dev_priv->pm._irqs_disabled = true;
4788 }
4789
4790 /* Restore interrupts so we can recover from runtime PM. */
4791 void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4792 {
4793 struct drm_i915_private *dev_priv = dev->dev_private;
4794
4795 dev_priv->pm._irqs_disabled = false;
4796 dev->driver->irq_preinstall(dev);
4797 dev->driver->irq_postinstall(dev);
4798 }
This page took 0.206647 seconds and 6 git commands to generate.