drm/i915: Bikeshed rpm functions name a bit.
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46 };
47
48 static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54 };
55
56 static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63 };
64
65 static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72 };
73
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81 };
82
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92 } while (0)
93
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
102 } while (0)
103
104 /*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117 } while (0)
118
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124 } while (0)
125
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131 } while (0)
132
133 /* For display hotplug interrupt */
134 static void
135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
136 {
137 assert_spin_locked(&dev_priv->irq_lock);
138
139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
140 return;
141
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
145 POSTING_READ(DEIMR);
146 }
147 }
148
149 static void
150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151 {
152 assert_spin_locked(&dev_priv->irq_lock);
153
154 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
155 return;
156
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 POSTING_READ(DEIMR);
161 }
162 }
163
164 /**
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
169 */
170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
173 {
174 assert_spin_locked(&dev_priv->irq_lock);
175
176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
177 return;
178
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
182 POSTING_READ(GTIMR);
183 }
184
185 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186 {
187 ilk_update_gt_irq(dev_priv, mask, mask);
188 }
189
190 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191 {
192 ilk_update_gt_irq(dev_priv, mask, 0);
193 }
194
195 /**
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
200 */
201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204 {
205 uint32_t new_val;
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
210 return;
211
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
220 }
221 }
222
223 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224 {
225 snb_update_pm_irq(dev_priv, mask, mask);
226 }
227
228 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229 {
230 snb_update_pm_irq(dev_priv, mask, 0);
231 }
232
233 static bool ivb_can_enable_err_int(struct drm_device *dev)
234 {
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
239 assert_spin_locked(&dev_priv->irq_lock);
240
241 for_each_pipe(dev_priv, pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249 }
250
251 /**
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
256 *
257 * Copied from the snb function, updated with relevant register offsets
258 */
259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262 {
263 uint32_t new_val;
264
265 assert_spin_locked(&dev_priv->irq_lock);
266
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return;
269
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
273
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
278 }
279 }
280
281 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282 {
283 bdw_update_pm_irq(dev_priv, mask, mask);
284 }
285
286 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287 {
288 bdw_update_pm_irq(dev_priv, mask, 0);
289 }
290
291 static bool cpt_can_enable_serr_int(struct drm_device *dev)
292 {
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum pipe pipe;
295 struct intel_crtc *crtc;
296
297 assert_spin_locked(&dev_priv->irq_lock);
298
299 for_each_pipe(dev_priv, pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301
302 if (crtc->pch_fifo_underrun_disabled)
303 return false;
304 }
305
306 return true;
307 }
308
309 void i9xx_check_fifo_underruns(struct drm_device *dev)
310 {
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
313
314 spin_lock_irq(&dev_priv->irq_lock);
315
316 for_each_intel_crtc(dev, crtc) {
317 u32 reg = PIPESTAT(crtc->pipe);
318 u32 pipestat;
319
320 if (crtc->cpu_fifo_underrun_disabled)
321 continue;
322
323 pipestat = I915_READ(reg) & 0xffff0000;
324 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
325 continue;
326
327 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
328 POSTING_READ(reg);
329
330 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
331 }
332
333 spin_unlock_irq(&dev_priv->irq_lock);
334 }
335
336 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
337 enum pipe pipe,
338 bool enable, bool old)
339 {
340 struct drm_i915_private *dev_priv = dev->dev_private;
341 u32 reg = PIPESTAT(pipe);
342 u32 pipestat = I915_READ(reg) & 0xffff0000;
343
344 assert_spin_locked(&dev_priv->irq_lock);
345
346 if (enable) {
347 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
348 POSTING_READ(reg);
349 } else {
350 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
351 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
352 }
353 }
354
355 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
356 enum pipe pipe, bool enable)
357 {
358 struct drm_i915_private *dev_priv = dev->dev_private;
359 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
360 DE_PIPEB_FIFO_UNDERRUN;
361
362 if (enable)
363 ironlake_enable_display_irq(dev_priv, bit);
364 else
365 ironlake_disable_display_irq(dev_priv, bit);
366 }
367
368 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
369 enum pipe pipe,
370 bool enable, bool old)
371 {
372 struct drm_i915_private *dev_priv = dev->dev_private;
373 if (enable) {
374 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
375
376 if (!ivb_can_enable_err_int(dev))
377 return;
378
379 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
380 } else {
381 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
382
383 if (old &&
384 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
385 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
386 pipe_name(pipe));
387 }
388 }
389 }
390
391 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
392 enum pipe pipe, bool enable)
393 {
394 struct drm_i915_private *dev_priv = dev->dev_private;
395
396 assert_spin_locked(&dev_priv->irq_lock);
397
398 if (enable)
399 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
400 else
401 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
402 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
403 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
404 }
405
406 /**
407 * ibx_display_interrupt_update - update SDEIMR
408 * @dev_priv: driver private
409 * @interrupt_mask: mask of interrupt bits to update
410 * @enabled_irq_mask: mask of interrupt bits to enable
411 */
412 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
413 uint32_t interrupt_mask,
414 uint32_t enabled_irq_mask)
415 {
416 uint32_t sdeimr = I915_READ(SDEIMR);
417 sdeimr &= ~interrupt_mask;
418 sdeimr |= (~enabled_irq_mask & interrupt_mask);
419
420 assert_spin_locked(&dev_priv->irq_lock);
421
422 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
423 return;
424
425 I915_WRITE(SDEIMR, sdeimr);
426 POSTING_READ(SDEIMR);
427 }
428 #define ibx_enable_display_interrupt(dev_priv, bits) \
429 ibx_display_interrupt_update((dev_priv), (bits), (bits))
430 #define ibx_disable_display_interrupt(dev_priv, bits) \
431 ibx_display_interrupt_update((dev_priv), (bits), 0)
432
433 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
434 enum transcoder pch_transcoder,
435 bool enable)
436 {
437 struct drm_i915_private *dev_priv = dev->dev_private;
438 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
439 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
440
441 if (enable)
442 ibx_enable_display_interrupt(dev_priv, bit);
443 else
444 ibx_disable_display_interrupt(dev_priv, bit);
445 }
446
447 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
448 enum transcoder pch_transcoder,
449 bool enable, bool old)
450 {
451 struct drm_i915_private *dev_priv = dev->dev_private;
452
453 if (enable) {
454 I915_WRITE(SERR_INT,
455 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
456
457 if (!cpt_can_enable_serr_int(dev))
458 return;
459
460 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
461 } else {
462 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
463
464 if (old && I915_READ(SERR_INT) &
465 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
466 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
467 transcoder_name(pch_transcoder));
468 }
469 }
470 }
471
472 /**
473 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
474 * @dev: drm device
475 * @pipe: pipe
476 * @enable: true if we want to report FIFO underrun errors, false otherwise
477 *
478 * This function makes us disable or enable CPU fifo underruns for a specific
479 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
480 * reporting for one pipe may also disable all the other CPU error interruts for
481 * the other pipes, due to the fact that there's just one interrupt mask/enable
482 * bit for all the pipes.
483 *
484 * Returns the previous state of underrun reporting.
485 */
486 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
487 enum pipe pipe, bool enable)
488 {
489 struct drm_i915_private *dev_priv = dev->dev_private;
490 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
492 bool old;
493
494 assert_spin_locked(&dev_priv->irq_lock);
495
496 old = !intel_crtc->cpu_fifo_underrun_disabled;
497 intel_crtc->cpu_fifo_underrun_disabled = !enable;
498
499 if (HAS_GMCH_DISPLAY(dev))
500 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
501 else if (IS_GEN5(dev) || IS_GEN6(dev))
502 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
503 else if (IS_GEN7(dev))
504 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
505 else if (IS_GEN8(dev) || IS_GEN9(dev))
506 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
507
508 return old;
509 }
510
511 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
512 enum pipe pipe, bool enable)
513 {
514 struct drm_i915_private *dev_priv = dev->dev_private;
515 unsigned long flags;
516 bool ret;
517
518 spin_lock_irqsave(&dev_priv->irq_lock, flags);
519 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
520 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
521
522 return ret;
523 }
524
525 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
526 enum pipe pipe)
527 {
528 struct drm_i915_private *dev_priv = dev->dev_private;
529 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
530 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
531
532 return !intel_crtc->cpu_fifo_underrun_disabled;
533 }
534
535 /**
536 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
537 * @dev: drm device
538 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
539 * @enable: true if we want to report FIFO underrun errors, false otherwise
540 *
541 * This function makes us disable or enable PCH fifo underruns for a specific
542 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
543 * underrun reporting for one transcoder may also disable all the other PCH
544 * error interruts for the other transcoders, due to the fact that there's just
545 * one interrupt mask/enable bit for all the transcoders.
546 *
547 * Returns the previous state of underrun reporting.
548 */
549 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
550 enum transcoder pch_transcoder,
551 bool enable)
552 {
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
555 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
556 unsigned long flags;
557 bool old;
558
559 /*
560 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
561 * has only one pch transcoder A that all pipes can use. To avoid racy
562 * pch transcoder -> pipe lookups from interrupt code simply store the
563 * underrun statistics in crtc A. Since we never expose this anywhere
564 * nor use it outside of the fifo underrun code here using the "wrong"
565 * crtc on LPT won't cause issues.
566 */
567
568 spin_lock_irqsave(&dev_priv->irq_lock, flags);
569
570 old = !intel_crtc->pch_fifo_underrun_disabled;
571 intel_crtc->pch_fifo_underrun_disabled = !enable;
572
573 if (HAS_PCH_IBX(dev))
574 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
575 else
576 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
577
578 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
579 return old;
580 }
581
582
583 static void
584 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
585 u32 enable_mask, u32 status_mask)
586 {
587 u32 reg = PIPESTAT(pipe);
588 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
589
590 assert_spin_locked(&dev_priv->irq_lock);
591 WARN_ON(!intel_irqs_enabled(dev_priv));
592
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
597 return;
598
599 if ((pipestat & enable_mask) == enable_mask)
600 return;
601
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
603
604 /* Enable the interrupt, clear any pending status */
605 pipestat |= enable_mask | status_mask;
606 I915_WRITE(reg, pipestat);
607 POSTING_READ(reg);
608 }
609
610 static void
611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
613 {
614 u32 reg = PIPESTAT(pipe);
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
616
617 assert_spin_locked(&dev_priv->irq_lock);
618 WARN_ON(!intel_irqs_enabled(dev_priv));
619
620 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
621 status_mask & ~PIPESTAT_INT_STATUS_MASK,
622 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
623 pipe_name(pipe), enable_mask, status_mask))
624 return;
625
626 if ((pipestat & enable_mask) == 0)
627 return;
628
629 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
630
631 pipestat &= ~enable_mask;
632 I915_WRITE(reg, pipestat);
633 POSTING_READ(reg);
634 }
635
636 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
637 {
638 u32 enable_mask = status_mask << 16;
639
640 /*
641 * On pipe A we don't support the PSR interrupt yet,
642 * on pipe B and C the same bit MBZ.
643 */
644 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
645 return 0;
646 /*
647 * On pipe B and C we don't support the PSR interrupt yet, on pipe
648 * A the same bit is for perf counters which we don't use either.
649 */
650 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
651 return 0;
652
653 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
654 SPRITE0_FLIP_DONE_INT_EN_VLV |
655 SPRITE1_FLIP_DONE_INT_EN_VLV);
656 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
657 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
658 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
659 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
660
661 return enable_mask;
662 }
663
664 void
665 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
666 u32 status_mask)
667 {
668 u32 enable_mask;
669
670 if (IS_VALLEYVIEW(dev_priv->dev))
671 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
672 status_mask);
673 else
674 enable_mask = status_mask << 16;
675 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
676 }
677
678 void
679 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
680 u32 status_mask)
681 {
682 u32 enable_mask;
683
684 if (IS_VALLEYVIEW(dev_priv->dev))
685 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
686 status_mask);
687 else
688 enable_mask = status_mask << 16;
689 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
690 }
691
692 /**
693 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
694 */
695 static void i915_enable_asle_pipestat(struct drm_device *dev)
696 {
697 struct drm_i915_private *dev_priv = dev->dev_private;
698
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
700 return;
701
702 spin_lock_irq(&dev_priv->irq_lock);
703
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS);
708
709 spin_unlock_irq(&dev_priv->irq_lock);
710 }
711
712 /**
713 * i915_pipe_enabled - check if a pipe is enabled
714 * @dev: DRM device
715 * @pipe: pipe to check
716 *
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
720 */
721 static int
722 i915_pipe_enabled(struct drm_device *dev, int pipe)
723 {
724 struct drm_i915_private *dev_priv = dev->dev_private;
725
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
730
731 return intel_crtc->active;
732 } else {
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
734 }
735 }
736
737 /*
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
740 *
741 * Assumptions about the fictitious mode used in this example:
742 * vblank_start >= 3
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
746 *
747 * start of vblank:
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
751 * |
752 * | frame start:
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
755 * | |
756 * | | start of vsync:
757 * | | generate vsync interrupt
758 * | | |
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
766 * | | |
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
770 *
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
776 * vs = vertical sync
777 * vbs = vblank_start (number)
778 *
779 * Summary:
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
785 */
786
787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
788 {
789 /* Gen2 doesn't have a hardware frame counter */
790 return 0;
791 }
792
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
795 */
796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
797 {
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 unsigned long high_frame;
800 unsigned long low_frame;
801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
802
803 if (!i915_pipe_enabled(dev, pipe)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe));
806 return 0;
807 }
808
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
814
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
820 } else {
821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
822
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
829 }
830
831 /* Convert to pixel count */
832 vbl_start *= htotal;
833
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
836
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
839
840 /*
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
843 * register.
844 */
845 do {
846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
847 low = I915_READ(low_frame);
848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
849 } while (high1 != high2);
850
851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 pixel = low & PIPE_PIXEL_MASK;
853 low >>= PIPE_FRAME_LOW_SHIFT;
854
855 /*
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
859 */
860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
861 }
862
863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
864 {
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 int reg = PIPE_FRMCOUNT_GM45(pipe);
867
868 if (!i915_pipe_enabled(dev, pipe)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe));
871 return 0;
872 }
873
874 return I915_READ(reg);
875 }
876
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
879
880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
881 {
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
887
888 vtotal = mode->crtc_vtotal;
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
890 vtotal /= 2;
891
892 if (IS_GEN2(dev))
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
894 else
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
896
897 /*
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
900 */
901 return (position + crtc->scanline_offset) % vtotal;
902 }
903
904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
907 {
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
912 int position;
913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
914 bool in_vbl = true;
915 int ret = 0;
916 unsigned long irqflags;
917
918 if (!intel_crtc->active) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe));
921 return 0;
922 }
923
924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
929
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
932 vbl_end /= 2;
933 vtotal /= 2;
934 }
935
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
937
938 /*
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
942 */
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
944
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
946
947 /* Get optional system timestamp before query. */
948 if (stime)
949 *stime = ktime_get();
950
951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
954 */
955 position = __intel_get_crtc_scanline(intel_crtc);
956 } else {
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
959 * scanout position.
960 */
961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
962
963 /* convert to pixel counts */
964 vbl_start *= htotal;
965 vbl_end *= htotal;
966 vtotal *= htotal;
967
968 /*
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
976 */
977 if (position >= vtotal)
978 position = vtotal - 1;
979
980 /*
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
988 */
989 position = (position + htotal - hsync_start) % vtotal;
990 }
991
992 /* Get optional system timestamp after query. */
993 if (etime)
994 *etime = ktime_get();
995
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
997
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
999
1000 in_vbl = position >= vbl_start && position < vbl_end;
1001
1002 /*
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1006 * up since vbl_end.
1007 */
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1010 else
1011 position += vtotal - vbl_end;
1012
1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1014 *vpos = position;
1015 *hpos = 0;
1016 } else {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1019 }
1020
1021 /* In vblank? */
1022 if (in_vbl)
1023 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1024
1025 return ret;
1026 }
1027
1028 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1029 {
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1032 int position;
1033
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1037
1038 return position;
1039 }
1040
1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1042 int *max_error,
1043 struct timeval *vblank_time,
1044 unsigned flags)
1045 {
1046 struct drm_crtc *crtc;
1047
1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe);
1050 return -EINVAL;
1051 }
1052
1053 /* Get drm_crtc to timestamp: */
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1055 if (crtc == NULL) {
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1057 return -EINVAL;
1058 }
1059
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1062 return -EBUSY;
1063 }
1064
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1067 vblank_time, flags,
1068 crtc,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
1070 }
1071
1072 static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
1074 {
1075 enum drm_connector_status old_status;
1076
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1079
1080 connector->status = connector->funcs->detect(connector, false);
1081 if (old_status == connector->status)
1082 return false;
1083
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1085 connector->base.id,
1086 connector->name,
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1089
1090 return true;
1091 }
1092
1093 static void i915_digport_work_func(struct work_struct *work)
1094 {
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 u32 long_port_mask, short_port_mask;
1098 struct intel_digital_port *intel_dig_port;
1099 int i, ret;
1100 u32 old_bits = 0;
1101
1102 spin_lock_irq(&dev_priv->irq_lock);
1103 long_port_mask = dev_priv->long_hpd_port_mask;
1104 dev_priv->long_hpd_port_mask = 0;
1105 short_port_mask = dev_priv->short_hpd_port_mask;
1106 dev_priv->short_hpd_port_mask = 0;
1107 spin_unlock_irq(&dev_priv->irq_lock);
1108
1109 for (i = 0; i < I915_MAX_PORTS; i++) {
1110 bool valid = false;
1111 bool long_hpd = false;
1112 intel_dig_port = dev_priv->hpd_irq_port[i];
1113 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1114 continue;
1115
1116 if (long_port_mask & (1 << i)) {
1117 valid = true;
1118 long_hpd = true;
1119 } else if (short_port_mask & (1 << i))
1120 valid = true;
1121
1122 if (valid) {
1123 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1124 if (ret == true) {
1125 /* if we get true fallback to old school hpd */
1126 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1127 }
1128 }
1129 }
1130
1131 if (old_bits) {
1132 spin_lock_irq(&dev_priv->irq_lock);
1133 dev_priv->hpd_event_bits |= old_bits;
1134 spin_unlock_irq(&dev_priv->irq_lock);
1135 schedule_work(&dev_priv->hotplug_work);
1136 }
1137 }
1138
1139 /*
1140 * Handle hotplug events outside the interrupt handler proper.
1141 */
1142 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1143
1144 static void i915_hotplug_work_func(struct work_struct *work)
1145 {
1146 struct drm_i915_private *dev_priv =
1147 container_of(work, struct drm_i915_private, hotplug_work);
1148 struct drm_device *dev = dev_priv->dev;
1149 struct drm_mode_config *mode_config = &dev->mode_config;
1150 struct intel_connector *intel_connector;
1151 struct intel_encoder *intel_encoder;
1152 struct drm_connector *connector;
1153 bool hpd_disabled = false;
1154 bool changed = false;
1155 u32 hpd_event_bits;
1156
1157 mutex_lock(&mode_config->mutex);
1158 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1159
1160 spin_lock_irq(&dev_priv->irq_lock);
1161
1162 hpd_event_bits = dev_priv->hpd_event_bits;
1163 dev_priv->hpd_event_bits = 0;
1164 list_for_each_entry(connector, &mode_config->connector_list, head) {
1165 intel_connector = to_intel_connector(connector);
1166 if (!intel_connector->encoder)
1167 continue;
1168 intel_encoder = intel_connector->encoder;
1169 if (intel_encoder->hpd_pin > HPD_NONE &&
1170 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1171 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1172 DRM_INFO("HPD interrupt storm detected on connector %s: "
1173 "switching from hotplug detection to polling\n",
1174 connector->name);
1175 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1176 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1177 | DRM_CONNECTOR_POLL_DISCONNECT;
1178 hpd_disabled = true;
1179 }
1180 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1181 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1182 connector->name, intel_encoder->hpd_pin);
1183 }
1184 }
1185 /* if there were no outputs to poll, poll was disabled,
1186 * therefore make sure it's enabled when disabling HPD on
1187 * some connectors */
1188 if (hpd_disabled) {
1189 drm_kms_helper_poll_enable(dev);
1190 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1191 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1192 }
1193
1194 spin_unlock_irq(&dev_priv->irq_lock);
1195
1196 list_for_each_entry(connector, &mode_config->connector_list, head) {
1197 intel_connector = to_intel_connector(connector);
1198 if (!intel_connector->encoder)
1199 continue;
1200 intel_encoder = intel_connector->encoder;
1201 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1202 if (intel_encoder->hot_plug)
1203 intel_encoder->hot_plug(intel_encoder);
1204 if (intel_hpd_irq_event(dev, connector))
1205 changed = true;
1206 }
1207 }
1208 mutex_unlock(&mode_config->mutex);
1209
1210 if (changed)
1211 drm_kms_helper_hotplug_event(dev);
1212 }
1213
1214 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1215 {
1216 struct drm_i915_private *dev_priv = dev->dev_private;
1217 u32 busy_up, busy_down, max_avg, min_avg;
1218 u8 new_delay;
1219
1220 spin_lock(&mchdev_lock);
1221
1222 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1223
1224 new_delay = dev_priv->ips.cur_delay;
1225
1226 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1227 busy_up = I915_READ(RCPREVBSYTUPAVG);
1228 busy_down = I915_READ(RCPREVBSYTDNAVG);
1229 max_avg = I915_READ(RCBMAXAVG);
1230 min_avg = I915_READ(RCBMINAVG);
1231
1232 /* Handle RCS change request from hw */
1233 if (busy_up > max_avg) {
1234 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1235 new_delay = dev_priv->ips.cur_delay - 1;
1236 if (new_delay < dev_priv->ips.max_delay)
1237 new_delay = dev_priv->ips.max_delay;
1238 } else if (busy_down < min_avg) {
1239 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1240 new_delay = dev_priv->ips.cur_delay + 1;
1241 if (new_delay > dev_priv->ips.min_delay)
1242 new_delay = dev_priv->ips.min_delay;
1243 }
1244
1245 if (ironlake_set_drps(dev, new_delay))
1246 dev_priv->ips.cur_delay = new_delay;
1247
1248 spin_unlock(&mchdev_lock);
1249
1250 return;
1251 }
1252
1253 static void notify_ring(struct drm_device *dev,
1254 struct intel_engine_cs *ring)
1255 {
1256 if (!intel_ring_initialized(ring))
1257 return;
1258
1259 trace_i915_gem_request_complete(ring);
1260
1261 if (drm_core_check_feature(dev, DRIVER_MODESET))
1262 intel_notify_mmio_flip(ring);
1263
1264 wake_up_all(&ring->irq_queue);
1265 i915_queue_hangcheck(dev);
1266 }
1267
1268 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1269 struct intel_rps_ei *rps_ei)
1270 {
1271 u32 cz_ts, cz_freq_khz;
1272 u32 render_count, media_count;
1273 u32 elapsed_render, elapsed_media, elapsed_time;
1274 u32 residency = 0;
1275
1276 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1277 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1278
1279 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1280 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1281
1282 if (rps_ei->cz_clock == 0) {
1283 rps_ei->cz_clock = cz_ts;
1284 rps_ei->render_c0 = render_count;
1285 rps_ei->media_c0 = media_count;
1286
1287 return dev_priv->rps.cur_freq;
1288 }
1289
1290 elapsed_time = cz_ts - rps_ei->cz_clock;
1291 rps_ei->cz_clock = cz_ts;
1292
1293 elapsed_render = render_count - rps_ei->render_c0;
1294 rps_ei->render_c0 = render_count;
1295
1296 elapsed_media = media_count - rps_ei->media_c0;
1297 rps_ei->media_c0 = media_count;
1298
1299 /* Convert all the counters into common unit of milli sec */
1300 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1301 elapsed_render /= cz_freq_khz;
1302 elapsed_media /= cz_freq_khz;
1303
1304 /*
1305 * Calculate overall C0 residency percentage
1306 * only if elapsed time is non zero
1307 */
1308 if (elapsed_time) {
1309 residency =
1310 ((max(elapsed_render, elapsed_media) * 100)
1311 / elapsed_time);
1312 }
1313
1314 return residency;
1315 }
1316
1317 /**
1318 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1319 * busy-ness calculated from C0 counters of render & media power wells
1320 * @dev_priv: DRM device private
1321 *
1322 */
1323 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1324 {
1325 u32 residency_C0_up = 0, residency_C0_down = 0;
1326 int new_delay, adj;
1327
1328 dev_priv->rps.ei_interrupt_count++;
1329
1330 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1331
1332
1333 if (dev_priv->rps.up_ei.cz_clock == 0) {
1334 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1335 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1336 return dev_priv->rps.cur_freq;
1337 }
1338
1339
1340 /*
1341 * To down throttle, C0 residency should be less than down threshold
1342 * for continous EI intervals. So calculate down EI counters
1343 * once in VLV_INT_COUNT_FOR_DOWN_EI
1344 */
1345 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1346
1347 dev_priv->rps.ei_interrupt_count = 0;
1348
1349 residency_C0_down = vlv_c0_residency(dev_priv,
1350 &dev_priv->rps.down_ei);
1351 } else {
1352 residency_C0_up = vlv_c0_residency(dev_priv,
1353 &dev_priv->rps.up_ei);
1354 }
1355
1356 new_delay = dev_priv->rps.cur_freq;
1357
1358 adj = dev_priv->rps.last_adj;
1359 /* C0 residency is greater than UP threshold. Increase Frequency */
1360 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1361 if (adj > 0)
1362 adj *= 2;
1363 else
1364 adj = 1;
1365
1366 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1367 new_delay = dev_priv->rps.cur_freq + adj;
1368
1369 /*
1370 * For better performance, jump directly
1371 * to RPe if we're below it.
1372 */
1373 if (new_delay < dev_priv->rps.efficient_freq)
1374 new_delay = dev_priv->rps.efficient_freq;
1375
1376 } else if (!dev_priv->rps.ei_interrupt_count &&
1377 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1378 if (adj < 0)
1379 adj *= 2;
1380 else
1381 adj = -1;
1382 /*
1383 * This means, C0 residency is less than down threshold over
1384 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1385 */
1386 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1387 new_delay = dev_priv->rps.cur_freq + adj;
1388 }
1389
1390 return new_delay;
1391 }
1392
1393 static void gen6_pm_rps_work(struct work_struct *work)
1394 {
1395 struct drm_i915_private *dev_priv =
1396 container_of(work, struct drm_i915_private, rps.work);
1397 u32 pm_iir;
1398 int new_delay, adj;
1399
1400 spin_lock_irq(&dev_priv->irq_lock);
1401 pm_iir = dev_priv->rps.pm_iir;
1402 dev_priv->rps.pm_iir = 0;
1403 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1404 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1405 else {
1406 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1407 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1408 }
1409 spin_unlock_irq(&dev_priv->irq_lock);
1410
1411 /* Make sure we didn't queue anything we're not going to process. */
1412 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1413
1414 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1415 return;
1416
1417 mutex_lock(&dev_priv->rps.hw_lock);
1418
1419 adj = dev_priv->rps.last_adj;
1420 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1421 if (adj > 0)
1422 adj *= 2;
1423 else {
1424 /* CHV needs even encode values */
1425 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1426 }
1427 new_delay = dev_priv->rps.cur_freq + adj;
1428
1429 /*
1430 * For better performance, jump directly
1431 * to RPe if we're below it.
1432 */
1433 if (new_delay < dev_priv->rps.efficient_freq)
1434 new_delay = dev_priv->rps.efficient_freq;
1435 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1436 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1437 new_delay = dev_priv->rps.efficient_freq;
1438 else
1439 new_delay = dev_priv->rps.min_freq_softlimit;
1440 adj = 0;
1441 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1442 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1443 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1444 if (adj < 0)
1445 adj *= 2;
1446 else {
1447 /* CHV needs even encode values */
1448 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1449 }
1450 new_delay = dev_priv->rps.cur_freq + adj;
1451 } else { /* unknown event */
1452 new_delay = dev_priv->rps.cur_freq;
1453 }
1454
1455 /* sysfs frequency interfaces may have snuck in while servicing the
1456 * interrupt
1457 */
1458 new_delay = clamp_t(int, new_delay,
1459 dev_priv->rps.min_freq_softlimit,
1460 dev_priv->rps.max_freq_softlimit);
1461
1462 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1463
1464 if (IS_VALLEYVIEW(dev_priv->dev))
1465 valleyview_set_rps(dev_priv->dev, new_delay);
1466 else
1467 gen6_set_rps(dev_priv->dev, new_delay);
1468
1469 mutex_unlock(&dev_priv->rps.hw_lock);
1470 }
1471
1472
1473 /**
1474 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1475 * occurred.
1476 * @work: workqueue struct
1477 *
1478 * Doesn't actually do anything except notify userspace. As a consequence of
1479 * this event, userspace should try to remap the bad rows since statistically
1480 * it is likely the same row is more likely to go bad again.
1481 */
1482 static void ivybridge_parity_work(struct work_struct *work)
1483 {
1484 struct drm_i915_private *dev_priv =
1485 container_of(work, struct drm_i915_private, l3_parity.error_work);
1486 u32 error_status, row, bank, subbank;
1487 char *parity_event[6];
1488 uint32_t misccpctl;
1489 uint8_t slice = 0;
1490
1491 /* We must turn off DOP level clock gating to access the L3 registers.
1492 * In order to prevent a get/put style interface, acquire struct mutex
1493 * any time we access those registers.
1494 */
1495 mutex_lock(&dev_priv->dev->struct_mutex);
1496
1497 /* If we've screwed up tracking, just let the interrupt fire again */
1498 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1499 goto out;
1500
1501 misccpctl = I915_READ(GEN7_MISCCPCTL);
1502 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1503 POSTING_READ(GEN7_MISCCPCTL);
1504
1505 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1506 u32 reg;
1507
1508 slice--;
1509 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1510 break;
1511
1512 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1513
1514 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1515
1516 error_status = I915_READ(reg);
1517 row = GEN7_PARITY_ERROR_ROW(error_status);
1518 bank = GEN7_PARITY_ERROR_BANK(error_status);
1519 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1520
1521 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1522 POSTING_READ(reg);
1523
1524 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1525 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1526 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1527 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1528 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1529 parity_event[5] = NULL;
1530
1531 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1532 KOBJ_CHANGE, parity_event);
1533
1534 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1535 slice, row, bank, subbank);
1536
1537 kfree(parity_event[4]);
1538 kfree(parity_event[3]);
1539 kfree(parity_event[2]);
1540 kfree(parity_event[1]);
1541 }
1542
1543 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1544
1545 out:
1546 WARN_ON(dev_priv->l3_parity.which_slice);
1547 spin_lock_irq(&dev_priv->irq_lock);
1548 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1549 spin_unlock_irq(&dev_priv->irq_lock);
1550
1551 mutex_unlock(&dev_priv->dev->struct_mutex);
1552 }
1553
1554 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1555 {
1556 struct drm_i915_private *dev_priv = dev->dev_private;
1557
1558 if (!HAS_L3_DPF(dev))
1559 return;
1560
1561 spin_lock(&dev_priv->irq_lock);
1562 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1563 spin_unlock(&dev_priv->irq_lock);
1564
1565 iir &= GT_PARITY_ERROR(dev);
1566 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1567 dev_priv->l3_parity.which_slice |= 1 << 1;
1568
1569 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1570 dev_priv->l3_parity.which_slice |= 1 << 0;
1571
1572 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1573 }
1574
1575 static void ilk_gt_irq_handler(struct drm_device *dev,
1576 struct drm_i915_private *dev_priv,
1577 u32 gt_iir)
1578 {
1579 if (gt_iir &
1580 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1581 notify_ring(dev, &dev_priv->ring[RCS]);
1582 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1583 notify_ring(dev, &dev_priv->ring[VCS]);
1584 }
1585
1586 static void snb_gt_irq_handler(struct drm_device *dev,
1587 struct drm_i915_private *dev_priv,
1588 u32 gt_iir)
1589 {
1590
1591 if (gt_iir &
1592 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1593 notify_ring(dev, &dev_priv->ring[RCS]);
1594 if (gt_iir & GT_BSD_USER_INTERRUPT)
1595 notify_ring(dev, &dev_priv->ring[VCS]);
1596 if (gt_iir & GT_BLT_USER_INTERRUPT)
1597 notify_ring(dev, &dev_priv->ring[BCS]);
1598
1599 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1600 GT_BSD_CS_ERROR_INTERRUPT |
1601 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1602 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1603 gt_iir);
1604 }
1605
1606 if (gt_iir & GT_PARITY_ERROR(dev))
1607 ivybridge_parity_error_irq_handler(dev, gt_iir);
1608 }
1609
1610 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1611 {
1612 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1613 return;
1614
1615 spin_lock(&dev_priv->irq_lock);
1616 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1617 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1618 spin_unlock(&dev_priv->irq_lock);
1619
1620 queue_work(dev_priv->wq, &dev_priv->rps.work);
1621 }
1622
1623 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1624 struct drm_i915_private *dev_priv,
1625 u32 master_ctl)
1626 {
1627 struct intel_engine_cs *ring;
1628 u32 rcs, bcs, vcs;
1629 uint32_t tmp = 0;
1630 irqreturn_t ret = IRQ_NONE;
1631
1632 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1633 tmp = I915_READ(GEN8_GT_IIR(0));
1634 if (tmp) {
1635 I915_WRITE(GEN8_GT_IIR(0), tmp);
1636 ret = IRQ_HANDLED;
1637
1638 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1639 ring = &dev_priv->ring[RCS];
1640 if (rcs & GT_RENDER_USER_INTERRUPT)
1641 notify_ring(dev, ring);
1642 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1643 intel_execlists_handle_ctx_events(ring);
1644
1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1646 ring = &dev_priv->ring[BCS];
1647 if (bcs & GT_RENDER_USER_INTERRUPT)
1648 notify_ring(dev, ring);
1649 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1650 intel_execlists_handle_ctx_events(ring);
1651 } else
1652 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1653 }
1654
1655 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1656 tmp = I915_READ(GEN8_GT_IIR(1));
1657 if (tmp) {
1658 I915_WRITE(GEN8_GT_IIR(1), tmp);
1659 ret = IRQ_HANDLED;
1660
1661 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1662 ring = &dev_priv->ring[VCS];
1663 if (vcs & GT_RENDER_USER_INTERRUPT)
1664 notify_ring(dev, ring);
1665 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1666 intel_execlists_handle_ctx_events(ring);
1667
1668 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1669 ring = &dev_priv->ring[VCS2];
1670 if (vcs & GT_RENDER_USER_INTERRUPT)
1671 notify_ring(dev, ring);
1672 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1673 intel_execlists_handle_ctx_events(ring);
1674 } else
1675 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1676 }
1677
1678 if (master_ctl & GEN8_GT_PM_IRQ) {
1679 tmp = I915_READ(GEN8_GT_IIR(2));
1680 if (tmp & dev_priv->pm_rps_events) {
1681 I915_WRITE(GEN8_GT_IIR(2),
1682 tmp & dev_priv->pm_rps_events);
1683 ret = IRQ_HANDLED;
1684 gen8_rps_irq_handler(dev_priv, tmp);
1685 } else
1686 DRM_ERROR("The master control interrupt lied (PM)!\n");
1687 }
1688
1689 if (master_ctl & GEN8_GT_VECS_IRQ) {
1690 tmp = I915_READ(GEN8_GT_IIR(3));
1691 if (tmp) {
1692 I915_WRITE(GEN8_GT_IIR(3), tmp);
1693 ret = IRQ_HANDLED;
1694
1695 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1696 ring = &dev_priv->ring[VECS];
1697 if (vcs & GT_RENDER_USER_INTERRUPT)
1698 notify_ring(dev, ring);
1699 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1700 intel_execlists_handle_ctx_events(ring);
1701 } else
1702 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1703 }
1704
1705 return ret;
1706 }
1707
1708 #define HPD_STORM_DETECT_PERIOD 1000
1709 #define HPD_STORM_THRESHOLD 5
1710
1711 static int ilk_port_to_hotplug_shift(enum port port)
1712 {
1713 switch (port) {
1714 case PORT_A:
1715 case PORT_E:
1716 default:
1717 return -1;
1718 case PORT_B:
1719 return 0;
1720 case PORT_C:
1721 return 8;
1722 case PORT_D:
1723 return 16;
1724 }
1725 }
1726
1727 static int g4x_port_to_hotplug_shift(enum port port)
1728 {
1729 switch (port) {
1730 case PORT_A:
1731 case PORT_E:
1732 default:
1733 return -1;
1734 case PORT_B:
1735 return 17;
1736 case PORT_C:
1737 return 19;
1738 case PORT_D:
1739 return 21;
1740 }
1741 }
1742
1743 static inline enum port get_port_from_pin(enum hpd_pin pin)
1744 {
1745 switch (pin) {
1746 case HPD_PORT_B:
1747 return PORT_B;
1748 case HPD_PORT_C:
1749 return PORT_C;
1750 case HPD_PORT_D:
1751 return PORT_D;
1752 default:
1753 return PORT_A; /* no hpd */
1754 }
1755 }
1756
1757 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1758 u32 hotplug_trigger,
1759 u32 dig_hotplug_reg,
1760 const u32 *hpd)
1761 {
1762 struct drm_i915_private *dev_priv = dev->dev_private;
1763 int i;
1764 enum port port;
1765 bool storm_detected = false;
1766 bool queue_dig = false, queue_hp = false;
1767 u32 dig_shift;
1768 u32 dig_port_mask = 0;
1769
1770 if (!hotplug_trigger)
1771 return;
1772
1773 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1774 hotplug_trigger, dig_hotplug_reg);
1775
1776 spin_lock(&dev_priv->irq_lock);
1777 for (i = 1; i < HPD_NUM_PINS; i++) {
1778 if (!(hpd[i] & hotplug_trigger))
1779 continue;
1780
1781 port = get_port_from_pin(i);
1782 if (port && dev_priv->hpd_irq_port[port]) {
1783 bool long_hpd;
1784
1785 if (IS_G4X(dev)) {
1786 dig_shift = g4x_port_to_hotplug_shift(port);
1787 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1788 } else {
1789 dig_shift = ilk_port_to_hotplug_shift(port);
1790 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1791 }
1792
1793 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1794 port_name(port),
1795 long_hpd ? "long" : "short");
1796 /* for long HPD pulses we want to have the digital queue happen,
1797 but we still want HPD storm detection to function. */
1798 if (long_hpd) {
1799 dev_priv->long_hpd_port_mask |= (1 << port);
1800 dig_port_mask |= hpd[i];
1801 } else {
1802 /* for short HPD just trigger the digital queue */
1803 dev_priv->short_hpd_port_mask |= (1 << port);
1804 hotplug_trigger &= ~hpd[i];
1805 }
1806 queue_dig = true;
1807 }
1808 }
1809
1810 for (i = 1; i < HPD_NUM_PINS; i++) {
1811 if (hpd[i] & hotplug_trigger &&
1812 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1813 /*
1814 * On GMCH platforms the interrupt mask bits only
1815 * prevent irq generation, not the setting of the
1816 * hotplug bits itself. So only WARN about unexpected
1817 * interrupts on saner platforms.
1818 */
1819 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1820 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1821 hotplug_trigger, i, hpd[i]);
1822
1823 continue;
1824 }
1825
1826 if (!(hpd[i] & hotplug_trigger) ||
1827 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1828 continue;
1829
1830 if (!(dig_port_mask & hpd[i])) {
1831 dev_priv->hpd_event_bits |= (1 << i);
1832 queue_hp = true;
1833 }
1834
1835 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1836 dev_priv->hpd_stats[i].hpd_last_jiffies
1837 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1838 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1839 dev_priv->hpd_stats[i].hpd_cnt = 0;
1840 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1841 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1842 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1843 dev_priv->hpd_event_bits &= ~(1 << i);
1844 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1845 storm_detected = true;
1846 } else {
1847 dev_priv->hpd_stats[i].hpd_cnt++;
1848 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1849 dev_priv->hpd_stats[i].hpd_cnt);
1850 }
1851 }
1852
1853 if (storm_detected)
1854 dev_priv->display.hpd_irq_setup(dev);
1855 spin_unlock(&dev_priv->irq_lock);
1856
1857 /*
1858 * Our hotplug handler can grab modeset locks (by calling down into the
1859 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1860 * queue for otherwise the flush_work in the pageflip code will
1861 * deadlock.
1862 */
1863 if (queue_dig)
1864 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1865 if (queue_hp)
1866 schedule_work(&dev_priv->hotplug_work);
1867 }
1868
1869 static void gmbus_irq_handler(struct drm_device *dev)
1870 {
1871 struct drm_i915_private *dev_priv = dev->dev_private;
1872
1873 wake_up_all(&dev_priv->gmbus_wait_queue);
1874 }
1875
1876 static void dp_aux_irq_handler(struct drm_device *dev)
1877 {
1878 struct drm_i915_private *dev_priv = dev->dev_private;
1879
1880 wake_up_all(&dev_priv->gmbus_wait_queue);
1881 }
1882
1883 #if defined(CONFIG_DEBUG_FS)
1884 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1885 uint32_t crc0, uint32_t crc1,
1886 uint32_t crc2, uint32_t crc3,
1887 uint32_t crc4)
1888 {
1889 struct drm_i915_private *dev_priv = dev->dev_private;
1890 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1891 struct intel_pipe_crc_entry *entry;
1892 int head, tail;
1893
1894 spin_lock(&pipe_crc->lock);
1895
1896 if (!pipe_crc->entries) {
1897 spin_unlock(&pipe_crc->lock);
1898 DRM_ERROR("spurious interrupt\n");
1899 return;
1900 }
1901
1902 head = pipe_crc->head;
1903 tail = pipe_crc->tail;
1904
1905 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1906 spin_unlock(&pipe_crc->lock);
1907 DRM_ERROR("CRC buffer overflowing\n");
1908 return;
1909 }
1910
1911 entry = &pipe_crc->entries[head];
1912
1913 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1914 entry->crc[0] = crc0;
1915 entry->crc[1] = crc1;
1916 entry->crc[2] = crc2;
1917 entry->crc[3] = crc3;
1918 entry->crc[4] = crc4;
1919
1920 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1921 pipe_crc->head = head;
1922
1923 spin_unlock(&pipe_crc->lock);
1924
1925 wake_up_interruptible(&pipe_crc->wq);
1926 }
1927 #else
1928 static inline void
1929 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1930 uint32_t crc0, uint32_t crc1,
1931 uint32_t crc2, uint32_t crc3,
1932 uint32_t crc4) {}
1933 #endif
1934
1935
1936 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1937 {
1938 struct drm_i915_private *dev_priv = dev->dev_private;
1939
1940 display_pipe_crc_irq_handler(dev, pipe,
1941 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1942 0, 0, 0, 0);
1943 }
1944
1945 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1946 {
1947 struct drm_i915_private *dev_priv = dev->dev_private;
1948
1949 display_pipe_crc_irq_handler(dev, pipe,
1950 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1951 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1952 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1953 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1954 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1955 }
1956
1957 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1958 {
1959 struct drm_i915_private *dev_priv = dev->dev_private;
1960 uint32_t res1, res2;
1961
1962 if (INTEL_INFO(dev)->gen >= 3)
1963 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1964 else
1965 res1 = 0;
1966
1967 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1968 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1969 else
1970 res2 = 0;
1971
1972 display_pipe_crc_irq_handler(dev, pipe,
1973 I915_READ(PIPE_CRC_RES_RED(pipe)),
1974 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1975 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1976 res1, res2);
1977 }
1978
1979 void gen8_flip_interrupt(struct drm_device *dev)
1980 {
1981 struct drm_i915_private *dev_priv = dev->dev_private;
1982
1983 if (!dev_priv->rps.is_bdw_sw_turbo)
1984 return;
1985
1986 if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
1987 mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
1988 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
1989 }
1990 else {
1991 dev_priv->rps.sw_turbo.flip_timer.expires =
1992 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
1993 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
1994 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
1995 }
1996
1997 bdw_software_turbo(dev);
1998 }
1999
2000 /* The RPS events need forcewake, so we add them to a work queue and mask their
2001 * IMR bits until the work is done. Other interrupts can be processed without
2002 * the work queue. */
2003 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
2004 {
2005 if (pm_iir & dev_priv->pm_rps_events) {
2006 spin_lock(&dev_priv->irq_lock);
2007 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
2008 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
2009 spin_unlock(&dev_priv->irq_lock);
2010
2011 queue_work(dev_priv->wq, &dev_priv->rps.work);
2012 }
2013
2014 if (HAS_VEBOX(dev_priv->dev)) {
2015 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
2016 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
2017
2018 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
2019 i915_handle_error(dev_priv->dev, false,
2020 "VEBOX CS error interrupt 0x%08x",
2021 pm_iir);
2022 }
2023 }
2024 }
2025
2026 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
2027 {
2028 if (!drm_handle_vblank(dev, pipe))
2029 return false;
2030
2031 return true;
2032 }
2033
2034 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2035 {
2036 struct drm_i915_private *dev_priv = dev->dev_private;
2037 u32 pipe_stats[I915_MAX_PIPES] = { };
2038 int pipe;
2039
2040 spin_lock(&dev_priv->irq_lock);
2041 for_each_pipe(dev_priv, pipe) {
2042 int reg;
2043 u32 mask, iir_bit = 0;
2044
2045 /*
2046 * PIPESTAT bits get signalled even when the interrupt is
2047 * disabled with the mask bits, and some of the status bits do
2048 * not generate interrupts at all (like the underrun bit). Hence
2049 * we need to be careful that we only handle what we want to
2050 * handle.
2051 */
2052 mask = 0;
2053 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2054 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2055
2056 switch (pipe) {
2057 case PIPE_A:
2058 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2059 break;
2060 case PIPE_B:
2061 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2062 break;
2063 case PIPE_C:
2064 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2065 break;
2066 }
2067 if (iir & iir_bit)
2068 mask |= dev_priv->pipestat_irq_mask[pipe];
2069
2070 if (!mask)
2071 continue;
2072
2073 reg = PIPESTAT(pipe);
2074 mask |= PIPESTAT_INT_ENABLE_MASK;
2075 pipe_stats[pipe] = I915_READ(reg) & mask;
2076
2077 /*
2078 * Clear the PIPE*STAT regs before the IIR
2079 */
2080 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2081 PIPESTAT_INT_STATUS_MASK))
2082 I915_WRITE(reg, pipe_stats[pipe]);
2083 }
2084 spin_unlock(&dev_priv->irq_lock);
2085
2086 for_each_pipe(dev_priv, pipe) {
2087 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2088 intel_pipe_handle_vblank(dev, pipe))
2089 intel_check_page_flip(dev, pipe);
2090
2091 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2092 intel_prepare_page_flip(dev, pipe);
2093 intel_finish_page_flip(dev, pipe);
2094 }
2095
2096 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2097 i9xx_pipe_crc_irq_handler(dev, pipe);
2098
2099 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2100 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2101 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2102 }
2103
2104 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2105 gmbus_irq_handler(dev);
2106 }
2107
2108 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2109 {
2110 struct drm_i915_private *dev_priv = dev->dev_private;
2111 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2112
2113 if (hotplug_status) {
2114 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2115 /*
2116 * Make sure hotplug status is cleared before we clear IIR, or else we
2117 * may miss hotplug events.
2118 */
2119 POSTING_READ(PORT_HOTPLUG_STAT);
2120
2121 if (IS_G4X(dev)) {
2122 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2123
2124 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2125 } else {
2126 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2127
2128 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2129 }
2130
2131 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2132 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2133 dp_aux_irq_handler(dev);
2134 }
2135 }
2136
2137 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2138 {
2139 struct drm_device *dev = arg;
2140 struct drm_i915_private *dev_priv = dev->dev_private;
2141 u32 iir, gt_iir, pm_iir;
2142 irqreturn_t ret = IRQ_NONE;
2143
2144 while (true) {
2145 /* Find, clear, then process each source of interrupt */
2146
2147 gt_iir = I915_READ(GTIIR);
2148 if (gt_iir)
2149 I915_WRITE(GTIIR, gt_iir);
2150
2151 pm_iir = I915_READ(GEN6_PMIIR);
2152 if (pm_iir)
2153 I915_WRITE(GEN6_PMIIR, pm_iir);
2154
2155 iir = I915_READ(VLV_IIR);
2156 if (iir) {
2157 /* Consume port before clearing IIR or we'll miss events */
2158 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2159 i9xx_hpd_irq_handler(dev);
2160 I915_WRITE(VLV_IIR, iir);
2161 }
2162
2163 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2164 goto out;
2165
2166 ret = IRQ_HANDLED;
2167
2168 if (gt_iir)
2169 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2170 if (pm_iir)
2171 gen6_rps_irq_handler(dev_priv, pm_iir);
2172 /* Call regardless, as some status bits might not be
2173 * signalled in iir */
2174 valleyview_pipestat_irq_handler(dev, iir);
2175 }
2176
2177 out:
2178 return ret;
2179 }
2180
2181 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2182 {
2183 struct drm_device *dev = arg;
2184 struct drm_i915_private *dev_priv = dev->dev_private;
2185 u32 master_ctl, iir;
2186 irqreturn_t ret = IRQ_NONE;
2187
2188 for (;;) {
2189 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2190 iir = I915_READ(VLV_IIR);
2191
2192 if (master_ctl == 0 && iir == 0)
2193 break;
2194
2195 ret = IRQ_HANDLED;
2196
2197 I915_WRITE(GEN8_MASTER_IRQ, 0);
2198
2199 /* Find, clear, then process each source of interrupt */
2200
2201 if (iir) {
2202 /* Consume port before clearing IIR or we'll miss events */
2203 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2204 i9xx_hpd_irq_handler(dev);
2205 I915_WRITE(VLV_IIR, iir);
2206 }
2207
2208 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2209
2210 /* Call regardless, as some status bits might not be
2211 * signalled in iir */
2212 valleyview_pipestat_irq_handler(dev, iir);
2213
2214 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2215 POSTING_READ(GEN8_MASTER_IRQ);
2216 }
2217
2218 return ret;
2219 }
2220
2221 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2222 {
2223 struct drm_i915_private *dev_priv = dev->dev_private;
2224 int pipe;
2225 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2226 u32 dig_hotplug_reg;
2227
2228 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2229 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2230
2231 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2232
2233 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2234 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2235 SDE_AUDIO_POWER_SHIFT);
2236 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2237 port_name(port));
2238 }
2239
2240 if (pch_iir & SDE_AUX_MASK)
2241 dp_aux_irq_handler(dev);
2242
2243 if (pch_iir & SDE_GMBUS)
2244 gmbus_irq_handler(dev);
2245
2246 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2247 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2248
2249 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2250 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2251
2252 if (pch_iir & SDE_POISON)
2253 DRM_ERROR("PCH poison interrupt\n");
2254
2255 if (pch_iir & SDE_FDI_MASK)
2256 for_each_pipe(dev_priv, pipe)
2257 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2258 pipe_name(pipe),
2259 I915_READ(FDI_RX_IIR(pipe)));
2260
2261 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2262 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2263
2264 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2265 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2266
2267 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2268 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2269 false))
2270 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2271
2272 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2273 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2274 false))
2275 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2276 }
2277
2278 static void ivb_err_int_handler(struct drm_device *dev)
2279 {
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 u32 err_int = I915_READ(GEN7_ERR_INT);
2282 enum pipe pipe;
2283
2284 if (err_int & ERR_INT_POISON)
2285 DRM_ERROR("Poison interrupt\n");
2286
2287 for_each_pipe(dev_priv, pipe) {
2288 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2289 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2290 false))
2291 DRM_ERROR("Pipe %c FIFO underrun\n",
2292 pipe_name(pipe));
2293 }
2294
2295 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2296 if (IS_IVYBRIDGE(dev))
2297 ivb_pipe_crc_irq_handler(dev, pipe);
2298 else
2299 hsw_pipe_crc_irq_handler(dev, pipe);
2300 }
2301 }
2302
2303 I915_WRITE(GEN7_ERR_INT, err_int);
2304 }
2305
2306 static void cpt_serr_int_handler(struct drm_device *dev)
2307 {
2308 struct drm_i915_private *dev_priv = dev->dev_private;
2309 u32 serr_int = I915_READ(SERR_INT);
2310
2311 if (serr_int & SERR_INT_POISON)
2312 DRM_ERROR("PCH poison interrupt\n");
2313
2314 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2315 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2316 false))
2317 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2318
2319 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2320 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2321 false))
2322 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2323
2324 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2325 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2326 false))
2327 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2328
2329 I915_WRITE(SERR_INT, serr_int);
2330 }
2331
2332 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2333 {
2334 struct drm_i915_private *dev_priv = dev->dev_private;
2335 int pipe;
2336 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2337 u32 dig_hotplug_reg;
2338
2339 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2340 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2341
2342 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2343
2344 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2345 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2346 SDE_AUDIO_POWER_SHIFT_CPT);
2347 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2348 port_name(port));
2349 }
2350
2351 if (pch_iir & SDE_AUX_MASK_CPT)
2352 dp_aux_irq_handler(dev);
2353
2354 if (pch_iir & SDE_GMBUS_CPT)
2355 gmbus_irq_handler(dev);
2356
2357 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2358 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2359
2360 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2361 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2362
2363 if (pch_iir & SDE_FDI_MASK_CPT)
2364 for_each_pipe(dev_priv, pipe)
2365 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2366 pipe_name(pipe),
2367 I915_READ(FDI_RX_IIR(pipe)));
2368
2369 if (pch_iir & SDE_ERROR_CPT)
2370 cpt_serr_int_handler(dev);
2371 }
2372
2373 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2374 {
2375 struct drm_i915_private *dev_priv = dev->dev_private;
2376 enum pipe pipe;
2377
2378 if (de_iir & DE_AUX_CHANNEL_A)
2379 dp_aux_irq_handler(dev);
2380
2381 if (de_iir & DE_GSE)
2382 intel_opregion_asle_intr(dev);
2383
2384 if (de_iir & DE_POISON)
2385 DRM_ERROR("Poison interrupt\n");
2386
2387 for_each_pipe(dev_priv, pipe) {
2388 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2389 intel_pipe_handle_vblank(dev, pipe))
2390 intel_check_page_flip(dev, pipe);
2391
2392 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2393 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2394 DRM_ERROR("Pipe %c FIFO underrun\n",
2395 pipe_name(pipe));
2396
2397 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2398 i9xx_pipe_crc_irq_handler(dev, pipe);
2399
2400 /* plane/pipes map 1:1 on ilk+ */
2401 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2402 intel_prepare_page_flip(dev, pipe);
2403 intel_finish_page_flip_plane(dev, pipe);
2404 }
2405 }
2406
2407 /* check event from PCH */
2408 if (de_iir & DE_PCH_EVENT) {
2409 u32 pch_iir = I915_READ(SDEIIR);
2410
2411 if (HAS_PCH_CPT(dev))
2412 cpt_irq_handler(dev, pch_iir);
2413 else
2414 ibx_irq_handler(dev, pch_iir);
2415
2416 /* should clear PCH hotplug event before clear CPU irq */
2417 I915_WRITE(SDEIIR, pch_iir);
2418 }
2419
2420 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2421 ironlake_rps_change_irq_handler(dev);
2422 }
2423
2424 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2425 {
2426 struct drm_i915_private *dev_priv = dev->dev_private;
2427 enum pipe pipe;
2428
2429 if (de_iir & DE_ERR_INT_IVB)
2430 ivb_err_int_handler(dev);
2431
2432 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2433 dp_aux_irq_handler(dev);
2434
2435 if (de_iir & DE_GSE_IVB)
2436 intel_opregion_asle_intr(dev);
2437
2438 for_each_pipe(dev_priv, pipe) {
2439 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2440 intel_pipe_handle_vblank(dev, pipe))
2441 intel_check_page_flip(dev, pipe);
2442
2443 /* plane/pipes map 1:1 on ilk+ */
2444 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2445 intel_prepare_page_flip(dev, pipe);
2446 intel_finish_page_flip_plane(dev, pipe);
2447 }
2448 }
2449
2450 /* check event from PCH */
2451 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2452 u32 pch_iir = I915_READ(SDEIIR);
2453
2454 cpt_irq_handler(dev, pch_iir);
2455
2456 /* clear PCH hotplug event before clear CPU irq */
2457 I915_WRITE(SDEIIR, pch_iir);
2458 }
2459 }
2460
2461 /*
2462 * To handle irqs with the minimum potential races with fresh interrupts, we:
2463 * 1 - Disable Master Interrupt Control.
2464 * 2 - Find the source(s) of the interrupt.
2465 * 3 - Clear the Interrupt Identity bits (IIR).
2466 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2467 * 5 - Re-enable Master Interrupt Control.
2468 */
2469 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2470 {
2471 struct drm_device *dev = arg;
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2474 irqreturn_t ret = IRQ_NONE;
2475
2476 /* We get interrupts on unclaimed registers, so check for this before we
2477 * do any I915_{READ,WRITE}. */
2478 intel_uncore_check_errors(dev);
2479
2480 /* disable master interrupt before clearing iir */
2481 de_ier = I915_READ(DEIER);
2482 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2483 POSTING_READ(DEIER);
2484
2485 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2486 * interrupts will will be stored on its back queue, and then we'll be
2487 * able to process them after we restore SDEIER (as soon as we restore
2488 * it, we'll get an interrupt if SDEIIR still has something to process
2489 * due to its back queue). */
2490 if (!HAS_PCH_NOP(dev)) {
2491 sde_ier = I915_READ(SDEIER);
2492 I915_WRITE(SDEIER, 0);
2493 POSTING_READ(SDEIER);
2494 }
2495
2496 /* Find, clear, then process each source of interrupt */
2497
2498 gt_iir = I915_READ(GTIIR);
2499 if (gt_iir) {
2500 I915_WRITE(GTIIR, gt_iir);
2501 ret = IRQ_HANDLED;
2502 if (INTEL_INFO(dev)->gen >= 6)
2503 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2504 else
2505 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2506 }
2507
2508 de_iir = I915_READ(DEIIR);
2509 if (de_iir) {
2510 I915_WRITE(DEIIR, de_iir);
2511 ret = IRQ_HANDLED;
2512 if (INTEL_INFO(dev)->gen >= 7)
2513 ivb_display_irq_handler(dev, de_iir);
2514 else
2515 ilk_display_irq_handler(dev, de_iir);
2516 }
2517
2518 if (INTEL_INFO(dev)->gen >= 6) {
2519 u32 pm_iir = I915_READ(GEN6_PMIIR);
2520 if (pm_iir) {
2521 I915_WRITE(GEN6_PMIIR, pm_iir);
2522 ret = IRQ_HANDLED;
2523 gen6_rps_irq_handler(dev_priv, pm_iir);
2524 }
2525 }
2526
2527 I915_WRITE(DEIER, de_ier);
2528 POSTING_READ(DEIER);
2529 if (!HAS_PCH_NOP(dev)) {
2530 I915_WRITE(SDEIER, sde_ier);
2531 POSTING_READ(SDEIER);
2532 }
2533
2534 return ret;
2535 }
2536
2537 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2538 {
2539 struct drm_device *dev = arg;
2540 struct drm_i915_private *dev_priv = dev->dev_private;
2541 u32 master_ctl;
2542 irqreturn_t ret = IRQ_NONE;
2543 uint32_t tmp = 0;
2544 enum pipe pipe;
2545
2546 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2547 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2548 if (!master_ctl)
2549 return IRQ_NONE;
2550
2551 I915_WRITE(GEN8_MASTER_IRQ, 0);
2552 POSTING_READ(GEN8_MASTER_IRQ);
2553
2554 /* Find, clear, then process each source of interrupt */
2555
2556 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2557
2558 if (master_ctl & GEN8_DE_MISC_IRQ) {
2559 tmp = I915_READ(GEN8_DE_MISC_IIR);
2560 if (tmp) {
2561 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2562 ret = IRQ_HANDLED;
2563 if (tmp & GEN8_DE_MISC_GSE)
2564 intel_opregion_asle_intr(dev);
2565 else
2566 DRM_ERROR("Unexpected DE Misc interrupt\n");
2567 }
2568 else
2569 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2570 }
2571
2572 if (master_ctl & GEN8_DE_PORT_IRQ) {
2573 tmp = I915_READ(GEN8_DE_PORT_IIR);
2574 if (tmp) {
2575 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2576 ret = IRQ_HANDLED;
2577 if (tmp & GEN8_AUX_CHANNEL_A)
2578 dp_aux_irq_handler(dev);
2579 else
2580 DRM_ERROR("Unexpected DE Port interrupt\n");
2581 }
2582 else
2583 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2584 }
2585
2586 for_each_pipe(dev_priv, pipe) {
2587 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2588
2589 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2590 continue;
2591
2592 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2593 if (pipe_iir) {
2594 ret = IRQ_HANDLED;
2595 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2596
2597 if (pipe_iir & GEN8_PIPE_VBLANK &&
2598 intel_pipe_handle_vblank(dev, pipe))
2599 intel_check_page_flip(dev, pipe);
2600
2601 if (IS_GEN9(dev))
2602 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2603 else
2604 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2605
2606 if (flip_done) {
2607 intel_prepare_page_flip(dev, pipe);
2608 intel_finish_page_flip_plane(dev, pipe);
2609 }
2610
2611 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2612 hsw_pipe_crc_irq_handler(dev, pipe);
2613
2614 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2615 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2616 false))
2617 DRM_ERROR("Pipe %c FIFO underrun\n",
2618 pipe_name(pipe));
2619 }
2620
2621
2622 if (IS_GEN9(dev))
2623 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2624 else
2625 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2626
2627 if (fault_errors)
2628 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2629 pipe_name(pipe),
2630 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2631 } else
2632 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2633 }
2634
2635 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2636 /*
2637 * FIXME(BDW): Assume for now that the new interrupt handling
2638 * scheme also closed the SDE interrupt handling race we've seen
2639 * on older pch-split platforms. But this needs testing.
2640 */
2641 u32 pch_iir = I915_READ(SDEIIR);
2642 if (pch_iir) {
2643 I915_WRITE(SDEIIR, pch_iir);
2644 ret = IRQ_HANDLED;
2645 cpt_irq_handler(dev, pch_iir);
2646 } else
2647 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2648
2649 }
2650
2651 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2652 POSTING_READ(GEN8_MASTER_IRQ);
2653
2654 return ret;
2655 }
2656
2657 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2658 bool reset_completed)
2659 {
2660 struct intel_engine_cs *ring;
2661 int i;
2662
2663 /*
2664 * Notify all waiters for GPU completion events that reset state has
2665 * been changed, and that they need to restart their wait after
2666 * checking for potential errors (and bail out to drop locks if there is
2667 * a gpu reset pending so that i915_error_work_func can acquire them).
2668 */
2669
2670 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2671 for_each_ring(ring, dev_priv, i)
2672 wake_up_all(&ring->irq_queue);
2673
2674 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2675 wake_up_all(&dev_priv->pending_flip_queue);
2676
2677 /*
2678 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2679 * reset state is cleared.
2680 */
2681 if (reset_completed)
2682 wake_up_all(&dev_priv->gpu_error.reset_queue);
2683 }
2684
2685 /**
2686 * i915_error_work_func - do process context error handling work
2687 * @work: work struct
2688 *
2689 * Fire an error uevent so userspace can see that a hang or error
2690 * was detected.
2691 */
2692 static void i915_error_work_func(struct work_struct *work)
2693 {
2694 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2695 work);
2696 struct drm_i915_private *dev_priv =
2697 container_of(error, struct drm_i915_private, gpu_error);
2698 struct drm_device *dev = dev_priv->dev;
2699 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2700 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2701 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2702 int ret;
2703
2704 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2705
2706 /*
2707 * Note that there's only one work item which does gpu resets, so we
2708 * need not worry about concurrent gpu resets potentially incrementing
2709 * error->reset_counter twice. We only need to take care of another
2710 * racing irq/hangcheck declaring the gpu dead for a second time. A
2711 * quick check for that is good enough: schedule_work ensures the
2712 * correct ordering between hang detection and this work item, and since
2713 * the reset in-progress bit is only ever set by code outside of this
2714 * work we don't need to worry about any other races.
2715 */
2716 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2717 DRM_DEBUG_DRIVER("resetting chip\n");
2718 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2719 reset_event);
2720
2721 /*
2722 * In most cases it's guaranteed that we get here with an RPM
2723 * reference held, for example because there is a pending GPU
2724 * request that won't finish until the reset is done. This
2725 * isn't the case at least when we get here by doing a
2726 * simulated reset via debugs, so get an RPM reference.
2727 */
2728 intel_runtime_pm_get(dev_priv);
2729 /*
2730 * All state reset _must_ be completed before we update the
2731 * reset counter, for otherwise waiters might miss the reset
2732 * pending state and not properly drop locks, resulting in
2733 * deadlocks with the reset work.
2734 */
2735 ret = i915_reset(dev);
2736
2737 intel_display_handle_reset(dev);
2738
2739 intel_runtime_pm_put(dev_priv);
2740
2741 if (ret == 0) {
2742 /*
2743 * After all the gem state is reset, increment the reset
2744 * counter and wake up everyone waiting for the reset to
2745 * complete.
2746 *
2747 * Since unlock operations are a one-sided barrier only,
2748 * we need to insert a barrier here to order any seqno
2749 * updates before
2750 * the counter increment.
2751 */
2752 smp_mb__before_atomic();
2753 atomic_inc(&dev_priv->gpu_error.reset_counter);
2754
2755 kobject_uevent_env(&dev->primary->kdev->kobj,
2756 KOBJ_CHANGE, reset_done_event);
2757 } else {
2758 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2759 }
2760
2761 /*
2762 * Note: The wake_up also serves as a memory barrier so that
2763 * waiters see the update value of the reset counter atomic_t.
2764 */
2765 i915_error_wake_up(dev_priv, true);
2766 }
2767 }
2768
2769 static void i915_report_and_clear_eir(struct drm_device *dev)
2770 {
2771 struct drm_i915_private *dev_priv = dev->dev_private;
2772 uint32_t instdone[I915_NUM_INSTDONE_REG];
2773 u32 eir = I915_READ(EIR);
2774 int pipe, i;
2775
2776 if (!eir)
2777 return;
2778
2779 pr_err("render error detected, EIR: 0x%08x\n", eir);
2780
2781 i915_get_extra_instdone(dev, instdone);
2782
2783 if (IS_G4X(dev)) {
2784 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2785 u32 ipeir = I915_READ(IPEIR_I965);
2786
2787 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2788 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2789 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2790 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2791 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2792 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2793 I915_WRITE(IPEIR_I965, ipeir);
2794 POSTING_READ(IPEIR_I965);
2795 }
2796 if (eir & GM45_ERROR_PAGE_TABLE) {
2797 u32 pgtbl_err = I915_READ(PGTBL_ER);
2798 pr_err("page table error\n");
2799 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2800 I915_WRITE(PGTBL_ER, pgtbl_err);
2801 POSTING_READ(PGTBL_ER);
2802 }
2803 }
2804
2805 if (!IS_GEN2(dev)) {
2806 if (eir & I915_ERROR_PAGE_TABLE) {
2807 u32 pgtbl_err = I915_READ(PGTBL_ER);
2808 pr_err("page table error\n");
2809 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2810 I915_WRITE(PGTBL_ER, pgtbl_err);
2811 POSTING_READ(PGTBL_ER);
2812 }
2813 }
2814
2815 if (eir & I915_ERROR_MEMORY_REFRESH) {
2816 pr_err("memory refresh error:\n");
2817 for_each_pipe(dev_priv, pipe)
2818 pr_err("pipe %c stat: 0x%08x\n",
2819 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2820 /* pipestat has already been acked */
2821 }
2822 if (eir & I915_ERROR_INSTRUCTION) {
2823 pr_err("instruction error\n");
2824 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2825 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2826 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2827 if (INTEL_INFO(dev)->gen < 4) {
2828 u32 ipeir = I915_READ(IPEIR);
2829
2830 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2831 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2832 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2833 I915_WRITE(IPEIR, ipeir);
2834 POSTING_READ(IPEIR);
2835 } else {
2836 u32 ipeir = I915_READ(IPEIR_I965);
2837
2838 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2839 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2840 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2841 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2842 I915_WRITE(IPEIR_I965, ipeir);
2843 POSTING_READ(IPEIR_I965);
2844 }
2845 }
2846
2847 I915_WRITE(EIR, eir);
2848 POSTING_READ(EIR);
2849 eir = I915_READ(EIR);
2850 if (eir) {
2851 /*
2852 * some errors might have become stuck,
2853 * mask them.
2854 */
2855 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2856 I915_WRITE(EMR, I915_READ(EMR) | eir);
2857 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2858 }
2859 }
2860
2861 /**
2862 * i915_handle_error - handle an error interrupt
2863 * @dev: drm device
2864 *
2865 * Do some basic checking of regsiter state at error interrupt time and
2866 * dump it to the syslog. Also call i915_capture_error_state() to make
2867 * sure we get a record and make it available in debugfs. Fire a uevent
2868 * so userspace knows something bad happened (should trigger collection
2869 * of a ring dump etc.).
2870 */
2871 void i915_handle_error(struct drm_device *dev, bool wedged,
2872 const char *fmt, ...)
2873 {
2874 struct drm_i915_private *dev_priv = dev->dev_private;
2875 va_list args;
2876 char error_msg[80];
2877
2878 va_start(args, fmt);
2879 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2880 va_end(args);
2881
2882 i915_capture_error_state(dev, wedged, error_msg);
2883 i915_report_and_clear_eir(dev);
2884
2885 if (wedged) {
2886 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2887 &dev_priv->gpu_error.reset_counter);
2888
2889 /*
2890 * Wakeup waiting processes so that the reset work function
2891 * i915_error_work_func doesn't deadlock trying to grab various
2892 * locks. By bumping the reset counter first, the woken
2893 * processes will see a reset in progress and back off,
2894 * releasing their locks and then wait for the reset completion.
2895 * We must do this for _all_ gpu waiters that might hold locks
2896 * that the reset work needs to acquire.
2897 *
2898 * Note: The wake_up serves as the required memory barrier to
2899 * ensure that the waiters see the updated value of the reset
2900 * counter atomic_t.
2901 */
2902 i915_error_wake_up(dev_priv, false);
2903 }
2904
2905 /*
2906 * Our reset work can grab modeset locks (since it needs to reset the
2907 * state of outstanding pagelips). Hence it must not be run on our own
2908 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2909 * code will deadlock.
2910 */
2911 schedule_work(&dev_priv->gpu_error.work);
2912 }
2913
2914 /* Called from drm generic code, passed 'crtc' which
2915 * we use as a pipe index
2916 */
2917 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2918 {
2919 struct drm_i915_private *dev_priv = dev->dev_private;
2920 unsigned long irqflags;
2921
2922 if (!i915_pipe_enabled(dev, pipe))
2923 return -EINVAL;
2924
2925 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2926 if (INTEL_INFO(dev)->gen >= 4)
2927 i915_enable_pipestat(dev_priv, pipe,
2928 PIPE_START_VBLANK_INTERRUPT_STATUS);
2929 else
2930 i915_enable_pipestat(dev_priv, pipe,
2931 PIPE_VBLANK_INTERRUPT_STATUS);
2932 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2933
2934 return 0;
2935 }
2936
2937 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2938 {
2939 struct drm_i915_private *dev_priv = dev->dev_private;
2940 unsigned long irqflags;
2941 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2942 DE_PIPE_VBLANK(pipe);
2943
2944 if (!i915_pipe_enabled(dev, pipe))
2945 return -EINVAL;
2946
2947 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2948 ironlake_enable_display_irq(dev_priv, bit);
2949 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2950
2951 return 0;
2952 }
2953
2954 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2955 {
2956 struct drm_i915_private *dev_priv = dev->dev_private;
2957 unsigned long irqflags;
2958
2959 if (!i915_pipe_enabled(dev, pipe))
2960 return -EINVAL;
2961
2962 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2963 i915_enable_pipestat(dev_priv, pipe,
2964 PIPE_START_VBLANK_INTERRUPT_STATUS);
2965 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2966
2967 return 0;
2968 }
2969
2970 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2971 {
2972 struct drm_i915_private *dev_priv = dev->dev_private;
2973 unsigned long irqflags;
2974
2975 if (!i915_pipe_enabled(dev, pipe))
2976 return -EINVAL;
2977
2978 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2979 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2980 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2981 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2982 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2983 return 0;
2984 }
2985
2986 /* Called from drm generic code, passed 'crtc' which
2987 * we use as a pipe index
2988 */
2989 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2990 {
2991 struct drm_i915_private *dev_priv = dev->dev_private;
2992 unsigned long irqflags;
2993
2994 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2995 i915_disable_pipestat(dev_priv, pipe,
2996 PIPE_VBLANK_INTERRUPT_STATUS |
2997 PIPE_START_VBLANK_INTERRUPT_STATUS);
2998 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2999 }
3000
3001 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
3002 {
3003 struct drm_i915_private *dev_priv = dev->dev_private;
3004 unsigned long irqflags;
3005 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
3006 DE_PIPE_VBLANK(pipe);
3007
3008 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3009 ironlake_disable_display_irq(dev_priv, bit);
3010 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3011 }
3012
3013 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3014 {
3015 struct drm_i915_private *dev_priv = dev->dev_private;
3016 unsigned long irqflags;
3017
3018 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3019 i915_disable_pipestat(dev_priv, pipe,
3020 PIPE_START_VBLANK_INTERRUPT_STATUS);
3021 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3022 }
3023
3024 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3025 {
3026 struct drm_i915_private *dev_priv = dev->dev_private;
3027 unsigned long irqflags;
3028
3029 if (!i915_pipe_enabled(dev, pipe))
3030 return;
3031
3032 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3033 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3034 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3035 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3036 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3037 }
3038
3039 static u32
3040 ring_last_seqno(struct intel_engine_cs *ring)
3041 {
3042 return list_entry(ring->request_list.prev,
3043 struct drm_i915_gem_request, list)->seqno;
3044 }
3045
3046 static bool
3047 ring_idle(struct intel_engine_cs *ring, u32 seqno)
3048 {
3049 return (list_empty(&ring->request_list) ||
3050 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3051 }
3052
3053 static bool
3054 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3055 {
3056 if (INTEL_INFO(dev)->gen >= 8) {
3057 return (ipehr >> 23) == 0x1c;
3058 } else {
3059 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3060 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3061 MI_SEMAPHORE_REGISTER);
3062 }
3063 }
3064
3065 static struct intel_engine_cs *
3066 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3067 {
3068 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3069 struct intel_engine_cs *signaller;
3070 int i;
3071
3072 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3073 for_each_ring(signaller, dev_priv, i) {
3074 if (ring == signaller)
3075 continue;
3076
3077 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3078 return signaller;
3079 }
3080 } else {
3081 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3082
3083 for_each_ring(signaller, dev_priv, i) {
3084 if(ring == signaller)
3085 continue;
3086
3087 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3088 return signaller;
3089 }
3090 }
3091
3092 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3093 ring->id, ipehr, offset);
3094
3095 return NULL;
3096 }
3097
3098 static struct intel_engine_cs *
3099 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3100 {
3101 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3102 u32 cmd, ipehr, head;
3103 u64 offset = 0;
3104 int i, backwards;
3105
3106 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3107 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3108 return NULL;
3109
3110 /*
3111 * HEAD is likely pointing to the dword after the actual command,
3112 * so scan backwards until we find the MBOX. But limit it to just 3
3113 * or 4 dwords depending on the semaphore wait command size.
3114 * Note that we don't care about ACTHD here since that might
3115 * point at at batch, and semaphores are always emitted into the
3116 * ringbuffer itself.
3117 */
3118 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3119 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3120
3121 for (i = backwards; i; --i) {
3122 /*
3123 * Be paranoid and presume the hw has gone off into the wild -
3124 * our ring is smaller than what the hardware (and hence
3125 * HEAD_ADDR) allows. Also handles wrap-around.
3126 */
3127 head &= ring->buffer->size - 1;
3128
3129 /* This here seems to blow up */
3130 cmd = ioread32(ring->buffer->virtual_start + head);
3131 if (cmd == ipehr)
3132 break;
3133
3134 head -= 4;
3135 }
3136
3137 if (!i)
3138 return NULL;
3139
3140 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3141 if (INTEL_INFO(ring->dev)->gen >= 8) {
3142 offset = ioread32(ring->buffer->virtual_start + head + 12);
3143 offset <<= 32;
3144 offset = ioread32(ring->buffer->virtual_start + head + 8);
3145 }
3146 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3147 }
3148
3149 static int semaphore_passed(struct intel_engine_cs *ring)
3150 {
3151 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3152 struct intel_engine_cs *signaller;
3153 u32 seqno;
3154
3155 ring->hangcheck.deadlock++;
3156
3157 signaller = semaphore_waits_for(ring, &seqno);
3158 if (signaller == NULL)
3159 return -1;
3160
3161 /* Prevent pathological recursion due to driver bugs */
3162 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3163 return -1;
3164
3165 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3166 return 1;
3167
3168 /* cursory check for an unkickable deadlock */
3169 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3170 semaphore_passed(signaller) < 0)
3171 return -1;
3172
3173 return 0;
3174 }
3175
3176 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3177 {
3178 struct intel_engine_cs *ring;
3179 int i;
3180
3181 for_each_ring(ring, dev_priv, i)
3182 ring->hangcheck.deadlock = 0;
3183 }
3184
3185 static enum intel_ring_hangcheck_action
3186 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3187 {
3188 struct drm_device *dev = ring->dev;
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3190 u32 tmp;
3191
3192 if (acthd != ring->hangcheck.acthd) {
3193 if (acthd > ring->hangcheck.max_acthd) {
3194 ring->hangcheck.max_acthd = acthd;
3195 return HANGCHECK_ACTIVE;
3196 }
3197
3198 return HANGCHECK_ACTIVE_LOOP;
3199 }
3200
3201 if (IS_GEN2(dev))
3202 return HANGCHECK_HUNG;
3203
3204 /* Is the chip hanging on a WAIT_FOR_EVENT?
3205 * If so we can simply poke the RB_WAIT bit
3206 * and break the hang. This should work on
3207 * all but the second generation chipsets.
3208 */
3209 tmp = I915_READ_CTL(ring);
3210 if (tmp & RING_WAIT) {
3211 i915_handle_error(dev, false,
3212 "Kicking stuck wait on %s",
3213 ring->name);
3214 I915_WRITE_CTL(ring, tmp);
3215 return HANGCHECK_KICK;
3216 }
3217
3218 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3219 switch (semaphore_passed(ring)) {
3220 default:
3221 return HANGCHECK_HUNG;
3222 case 1:
3223 i915_handle_error(dev, false,
3224 "Kicking stuck semaphore on %s",
3225 ring->name);
3226 I915_WRITE_CTL(ring, tmp);
3227 return HANGCHECK_KICK;
3228 case 0:
3229 return HANGCHECK_WAIT;
3230 }
3231 }
3232
3233 return HANGCHECK_HUNG;
3234 }
3235
3236 /**
3237 * This is called when the chip hasn't reported back with completed
3238 * batchbuffers in a long time. We keep track per ring seqno progress and
3239 * if there are no progress, hangcheck score for that ring is increased.
3240 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3241 * we kick the ring. If we see no progress on three subsequent calls
3242 * we assume chip is wedged and try to fix it by resetting the chip.
3243 */
3244 static void i915_hangcheck_elapsed(unsigned long data)
3245 {
3246 struct drm_device *dev = (struct drm_device *)data;
3247 struct drm_i915_private *dev_priv = dev->dev_private;
3248 struct intel_engine_cs *ring;
3249 int i;
3250 int busy_count = 0, rings_hung = 0;
3251 bool stuck[I915_NUM_RINGS] = { 0 };
3252 #define BUSY 1
3253 #define KICK 5
3254 #define HUNG 20
3255
3256 if (!i915.enable_hangcheck)
3257 return;
3258
3259 for_each_ring(ring, dev_priv, i) {
3260 u64 acthd;
3261 u32 seqno;
3262 bool busy = true;
3263
3264 semaphore_clear_deadlocks(dev_priv);
3265
3266 seqno = ring->get_seqno(ring, false);
3267 acthd = intel_ring_get_active_head(ring);
3268
3269 if (ring->hangcheck.seqno == seqno) {
3270 if (ring_idle(ring, seqno)) {
3271 ring->hangcheck.action = HANGCHECK_IDLE;
3272
3273 if (waitqueue_active(&ring->irq_queue)) {
3274 /* Issue a wake-up to catch stuck h/w. */
3275 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3276 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3277 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3278 ring->name);
3279 else
3280 DRM_INFO("Fake missed irq on %s\n",
3281 ring->name);
3282 wake_up_all(&ring->irq_queue);
3283 }
3284 /* Safeguard against driver failure */
3285 ring->hangcheck.score += BUSY;
3286 } else
3287 busy = false;
3288 } else {
3289 /* We always increment the hangcheck score
3290 * if the ring is busy and still processing
3291 * the same request, so that no single request
3292 * can run indefinitely (such as a chain of
3293 * batches). The only time we do not increment
3294 * the hangcheck score on this ring, if this
3295 * ring is in a legitimate wait for another
3296 * ring. In that case the waiting ring is a
3297 * victim and we want to be sure we catch the
3298 * right culprit. Then every time we do kick
3299 * the ring, add a small increment to the
3300 * score so that we can catch a batch that is
3301 * being repeatedly kicked and so responsible
3302 * for stalling the machine.
3303 */
3304 ring->hangcheck.action = ring_stuck(ring,
3305 acthd);
3306
3307 switch (ring->hangcheck.action) {
3308 case HANGCHECK_IDLE:
3309 case HANGCHECK_WAIT:
3310 case HANGCHECK_ACTIVE:
3311 break;
3312 case HANGCHECK_ACTIVE_LOOP:
3313 ring->hangcheck.score += BUSY;
3314 break;
3315 case HANGCHECK_KICK:
3316 ring->hangcheck.score += KICK;
3317 break;
3318 case HANGCHECK_HUNG:
3319 ring->hangcheck.score += HUNG;
3320 stuck[i] = true;
3321 break;
3322 }
3323 }
3324 } else {
3325 ring->hangcheck.action = HANGCHECK_ACTIVE;
3326
3327 /* Gradually reduce the count so that we catch DoS
3328 * attempts across multiple batches.
3329 */
3330 if (ring->hangcheck.score > 0)
3331 ring->hangcheck.score--;
3332
3333 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3334 }
3335
3336 ring->hangcheck.seqno = seqno;
3337 ring->hangcheck.acthd = acthd;
3338 busy_count += busy;
3339 }
3340
3341 for_each_ring(ring, dev_priv, i) {
3342 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3343 DRM_INFO("%s on %s\n",
3344 stuck[i] ? "stuck" : "no progress",
3345 ring->name);
3346 rings_hung++;
3347 }
3348 }
3349
3350 if (rings_hung)
3351 return i915_handle_error(dev, true, "Ring hung");
3352
3353 if (busy_count)
3354 /* Reset timer case chip hangs without another request
3355 * being added */
3356 i915_queue_hangcheck(dev);
3357 }
3358
3359 void i915_queue_hangcheck(struct drm_device *dev)
3360 {
3361 struct drm_i915_private *dev_priv = dev->dev_private;
3362 if (!i915.enable_hangcheck)
3363 return;
3364
3365 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3366 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3367 }
3368
3369 static void ibx_irq_reset(struct drm_device *dev)
3370 {
3371 struct drm_i915_private *dev_priv = dev->dev_private;
3372
3373 if (HAS_PCH_NOP(dev))
3374 return;
3375
3376 GEN5_IRQ_RESET(SDE);
3377
3378 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3379 I915_WRITE(SERR_INT, 0xffffffff);
3380 }
3381
3382 /*
3383 * SDEIER is also touched by the interrupt handler to work around missed PCH
3384 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3385 * instead we unconditionally enable all PCH interrupt sources here, but then
3386 * only unmask them as needed with SDEIMR.
3387 *
3388 * This function needs to be called before interrupts are enabled.
3389 */
3390 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3391 {
3392 struct drm_i915_private *dev_priv = dev->dev_private;
3393
3394 if (HAS_PCH_NOP(dev))
3395 return;
3396
3397 WARN_ON(I915_READ(SDEIER) != 0);
3398 I915_WRITE(SDEIER, 0xffffffff);
3399 POSTING_READ(SDEIER);
3400 }
3401
3402 static void gen5_gt_irq_reset(struct drm_device *dev)
3403 {
3404 struct drm_i915_private *dev_priv = dev->dev_private;
3405
3406 GEN5_IRQ_RESET(GT);
3407 if (INTEL_INFO(dev)->gen >= 6)
3408 GEN5_IRQ_RESET(GEN6_PM);
3409 }
3410
3411 /* drm_dma.h hooks
3412 */
3413 static void ironlake_irq_reset(struct drm_device *dev)
3414 {
3415 struct drm_i915_private *dev_priv = dev->dev_private;
3416
3417 I915_WRITE(HWSTAM, 0xffffffff);
3418
3419 GEN5_IRQ_RESET(DE);
3420 if (IS_GEN7(dev))
3421 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3422
3423 gen5_gt_irq_reset(dev);
3424
3425 ibx_irq_reset(dev);
3426 }
3427
3428 static void valleyview_irq_preinstall(struct drm_device *dev)
3429 {
3430 struct drm_i915_private *dev_priv = dev->dev_private;
3431 int pipe;
3432
3433 /* VLV magic */
3434 I915_WRITE(VLV_IMR, 0);
3435 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3436 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3437 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3438
3439 /* and GT */
3440 I915_WRITE(GTIIR, I915_READ(GTIIR));
3441 I915_WRITE(GTIIR, I915_READ(GTIIR));
3442
3443 gen5_gt_irq_reset(dev);
3444
3445 I915_WRITE(DPINVGTT, 0xff);
3446
3447 I915_WRITE(PORT_HOTPLUG_EN, 0);
3448 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3449 for_each_pipe(dev_priv, pipe)
3450 I915_WRITE(PIPESTAT(pipe), 0xffff);
3451 I915_WRITE(VLV_IIR, 0xffffffff);
3452 I915_WRITE(VLV_IMR, 0xffffffff);
3453 I915_WRITE(VLV_IER, 0x0);
3454 POSTING_READ(VLV_IER);
3455 }
3456
3457 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3458 {
3459 GEN8_IRQ_RESET_NDX(GT, 0);
3460 GEN8_IRQ_RESET_NDX(GT, 1);
3461 GEN8_IRQ_RESET_NDX(GT, 2);
3462 GEN8_IRQ_RESET_NDX(GT, 3);
3463 }
3464
3465 static void gen8_irq_reset(struct drm_device *dev)
3466 {
3467 struct drm_i915_private *dev_priv = dev->dev_private;
3468 int pipe;
3469
3470 I915_WRITE(GEN8_MASTER_IRQ, 0);
3471 POSTING_READ(GEN8_MASTER_IRQ);
3472
3473 gen8_gt_irq_reset(dev_priv);
3474
3475 for_each_pipe(dev_priv, pipe)
3476 if (intel_display_power_is_enabled(dev_priv,
3477 POWER_DOMAIN_PIPE(pipe)))
3478 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3479
3480 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3481 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3482 GEN5_IRQ_RESET(GEN8_PCU_);
3483
3484 ibx_irq_reset(dev);
3485 }
3486
3487 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3488 {
3489 spin_lock_irq(&dev_priv->irq_lock);
3490 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3491 ~dev_priv->de_irq_mask[PIPE_B]);
3492 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3493 ~dev_priv->de_irq_mask[PIPE_C]);
3494 spin_unlock_irq(&dev_priv->irq_lock);
3495 }
3496
3497 static void cherryview_irq_preinstall(struct drm_device *dev)
3498 {
3499 struct drm_i915_private *dev_priv = dev->dev_private;
3500 int pipe;
3501
3502 I915_WRITE(GEN8_MASTER_IRQ, 0);
3503 POSTING_READ(GEN8_MASTER_IRQ);
3504
3505 gen8_gt_irq_reset(dev_priv);
3506
3507 GEN5_IRQ_RESET(GEN8_PCU_);
3508
3509 POSTING_READ(GEN8_PCU_IIR);
3510
3511 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3512
3513 I915_WRITE(PORT_HOTPLUG_EN, 0);
3514 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3515
3516 for_each_pipe(dev_priv, pipe)
3517 I915_WRITE(PIPESTAT(pipe), 0xffff);
3518
3519 I915_WRITE(VLV_IMR, 0xffffffff);
3520 I915_WRITE(VLV_IER, 0x0);
3521 I915_WRITE(VLV_IIR, 0xffffffff);
3522 POSTING_READ(VLV_IIR);
3523 }
3524
3525 static void ibx_hpd_irq_setup(struct drm_device *dev)
3526 {
3527 struct drm_i915_private *dev_priv = dev->dev_private;
3528 struct intel_encoder *intel_encoder;
3529 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3530
3531 if (HAS_PCH_IBX(dev)) {
3532 hotplug_irqs = SDE_HOTPLUG_MASK;
3533 for_each_intel_encoder(dev, intel_encoder)
3534 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3535 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3536 } else {
3537 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3538 for_each_intel_encoder(dev, intel_encoder)
3539 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3540 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3541 }
3542
3543 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3544
3545 /*
3546 * Enable digital hotplug on the PCH, and configure the DP short pulse
3547 * duration to 2ms (which is the minimum in the Display Port spec)
3548 *
3549 * This register is the same on all known PCH chips.
3550 */
3551 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3552 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3553 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3554 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3555 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3556 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3557 }
3558
3559 static void ibx_irq_postinstall(struct drm_device *dev)
3560 {
3561 struct drm_i915_private *dev_priv = dev->dev_private;
3562 u32 mask;
3563
3564 if (HAS_PCH_NOP(dev))
3565 return;
3566
3567 if (HAS_PCH_IBX(dev))
3568 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3569 else
3570 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3571
3572 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3573 I915_WRITE(SDEIMR, ~mask);
3574 }
3575
3576 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3577 {
3578 struct drm_i915_private *dev_priv = dev->dev_private;
3579 u32 pm_irqs, gt_irqs;
3580
3581 pm_irqs = gt_irqs = 0;
3582
3583 dev_priv->gt_irq_mask = ~0;
3584 if (HAS_L3_DPF(dev)) {
3585 /* L3 parity interrupt is always unmasked. */
3586 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3587 gt_irqs |= GT_PARITY_ERROR(dev);
3588 }
3589
3590 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3591 if (IS_GEN5(dev)) {
3592 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3593 ILK_BSD_USER_INTERRUPT;
3594 } else {
3595 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3596 }
3597
3598 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3599
3600 if (INTEL_INFO(dev)->gen >= 6) {
3601 pm_irqs |= dev_priv->pm_rps_events;
3602
3603 if (HAS_VEBOX(dev))
3604 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3605
3606 dev_priv->pm_irq_mask = 0xffffffff;
3607 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3608 }
3609 }
3610
3611 static int ironlake_irq_postinstall(struct drm_device *dev)
3612 {
3613 struct drm_i915_private *dev_priv = dev->dev_private;
3614 u32 display_mask, extra_mask;
3615
3616 if (INTEL_INFO(dev)->gen >= 7) {
3617 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3618 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3619 DE_PLANEB_FLIP_DONE_IVB |
3620 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3621 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3622 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3623 } else {
3624 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3625 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3626 DE_AUX_CHANNEL_A |
3627 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3628 DE_POISON);
3629 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3630 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3631 }
3632
3633 dev_priv->irq_mask = ~display_mask;
3634
3635 I915_WRITE(HWSTAM, 0xeffe);
3636
3637 ibx_irq_pre_postinstall(dev);
3638
3639 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3640
3641 gen5_gt_irq_postinstall(dev);
3642
3643 ibx_irq_postinstall(dev);
3644
3645 if (IS_IRONLAKE_M(dev)) {
3646 /* Enable PCU event interrupts
3647 *
3648 * spinlocking not required here for correctness since interrupt
3649 * setup is guaranteed to run in single-threaded context. But we
3650 * need it to make the assert_spin_locked happy. */
3651 spin_lock_irq(&dev_priv->irq_lock);
3652 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3653 spin_unlock_irq(&dev_priv->irq_lock);
3654 }
3655
3656 return 0;
3657 }
3658
3659 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3660 {
3661 u32 pipestat_mask;
3662 u32 iir_mask;
3663
3664 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3665 PIPE_FIFO_UNDERRUN_STATUS;
3666
3667 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3668 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3669 POSTING_READ(PIPESTAT(PIPE_A));
3670
3671 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3672 PIPE_CRC_DONE_INTERRUPT_STATUS;
3673
3674 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3675 PIPE_GMBUS_INTERRUPT_STATUS);
3676 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3677
3678 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3679 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3680 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3681 dev_priv->irq_mask &= ~iir_mask;
3682
3683 I915_WRITE(VLV_IIR, iir_mask);
3684 I915_WRITE(VLV_IIR, iir_mask);
3685 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3686 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3687 POSTING_READ(VLV_IER);
3688 }
3689
3690 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3691 {
3692 u32 pipestat_mask;
3693 u32 iir_mask;
3694
3695 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3696 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3697 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3698
3699 dev_priv->irq_mask |= iir_mask;
3700 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3701 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3702 I915_WRITE(VLV_IIR, iir_mask);
3703 I915_WRITE(VLV_IIR, iir_mask);
3704 POSTING_READ(VLV_IIR);
3705
3706 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3707 PIPE_CRC_DONE_INTERRUPT_STATUS;
3708
3709 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3710 PIPE_GMBUS_INTERRUPT_STATUS);
3711 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3712
3713 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3714 PIPE_FIFO_UNDERRUN_STATUS;
3715 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3716 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3717 POSTING_READ(PIPESTAT(PIPE_A));
3718 }
3719
3720 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3721 {
3722 assert_spin_locked(&dev_priv->irq_lock);
3723
3724 if (dev_priv->display_irqs_enabled)
3725 return;
3726
3727 dev_priv->display_irqs_enabled = true;
3728
3729 if (intel_irqs_enabled(dev_priv))
3730 valleyview_display_irqs_install(dev_priv);
3731 }
3732
3733 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3734 {
3735 assert_spin_locked(&dev_priv->irq_lock);
3736
3737 if (!dev_priv->display_irqs_enabled)
3738 return;
3739
3740 dev_priv->display_irqs_enabled = false;
3741
3742 if (intel_irqs_enabled(dev_priv))
3743 valleyview_display_irqs_uninstall(dev_priv);
3744 }
3745
3746 static int valleyview_irq_postinstall(struct drm_device *dev)
3747 {
3748 struct drm_i915_private *dev_priv = dev->dev_private;
3749
3750 dev_priv->irq_mask = ~0;
3751
3752 I915_WRITE(PORT_HOTPLUG_EN, 0);
3753 POSTING_READ(PORT_HOTPLUG_EN);
3754
3755 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3756 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3757 I915_WRITE(VLV_IIR, 0xffffffff);
3758 POSTING_READ(VLV_IER);
3759
3760 /* Interrupt setup is already guaranteed to be single-threaded, this is
3761 * just to make the assert_spin_locked check happy. */
3762 spin_lock_irq(&dev_priv->irq_lock);
3763 if (dev_priv->display_irqs_enabled)
3764 valleyview_display_irqs_install(dev_priv);
3765 spin_unlock_irq(&dev_priv->irq_lock);
3766
3767 I915_WRITE(VLV_IIR, 0xffffffff);
3768 I915_WRITE(VLV_IIR, 0xffffffff);
3769
3770 gen5_gt_irq_postinstall(dev);
3771
3772 /* ack & enable invalid PTE error interrupts */
3773 #if 0 /* FIXME: add support to irq handler for checking these bits */
3774 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3775 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3776 #endif
3777
3778 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3779
3780 return 0;
3781 }
3782
3783 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3784 {
3785 /* These are interrupts we'll toggle with the ring mask register */
3786 uint32_t gt_interrupts[] = {
3787 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3788 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3789 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3790 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3791 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3792 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3793 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3794 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3795 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3796 0,
3797 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3798 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3799 };
3800
3801 dev_priv->pm_irq_mask = 0xffffffff;
3802 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3803 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3804 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3805 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3806 }
3807
3808 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3809 {
3810 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3811 uint32_t de_pipe_enables;
3812 int pipe;
3813
3814 if (IS_GEN9(dev_priv))
3815 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3816 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3817 else
3818 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3819 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3820
3821 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3822 GEN8_PIPE_FIFO_UNDERRUN;
3823
3824 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3825 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3826 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3827
3828 for_each_pipe(dev_priv, pipe)
3829 if (intel_display_power_is_enabled(dev_priv,
3830 POWER_DOMAIN_PIPE(pipe)))
3831 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3832 dev_priv->de_irq_mask[pipe],
3833 de_pipe_enables);
3834
3835 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3836 }
3837
3838 static int gen8_irq_postinstall(struct drm_device *dev)
3839 {
3840 struct drm_i915_private *dev_priv = dev->dev_private;
3841
3842 ibx_irq_pre_postinstall(dev);
3843
3844 gen8_gt_irq_postinstall(dev_priv);
3845 gen8_de_irq_postinstall(dev_priv);
3846
3847 ibx_irq_postinstall(dev);
3848
3849 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3850 POSTING_READ(GEN8_MASTER_IRQ);
3851
3852 return 0;
3853 }
3854
3855 static int cherryview_irq_postinstall(struct drm_device *dev)
3856 {
3857 struct drm_i915_private *dev_priv = dev->dev_private;
3858 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3859 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3860 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3861 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3862 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3863 PIPE_CRC_DONE_INTERRUPT_STATUS;
3864 int pipe;
3865
3866 /*
3867 * Leave vblank interrupts masked initially. enable/disable will
3868 * toggle them based on usage.
3869 */
3870 dev_priv->irq_mask = ~enable_mask;
3871
3872 for_each_pipe(dev_priv, pipe)
3873 I915_WRITE(PIPESTAT(pipe), 0xffff);
3874
3875 spin_lock_irq(&dev_priv->irq_lock);
3876 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3877 for_each_pipe(dev_priv, pipe)
3878 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3879 spin_unlock_irq(&dev_priv->irq_lock);
3880
3881 I915_WRITE(VLV_IIR, 0xffffffff);
3882 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3883 I915_WRITE(VLV_IER, enable_mask);
3884
3885 gen8_gt_irq_postinstall(dev_priv);
3886
3887 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3888 POSTING_READ(GEN8_MASTER_IRQ);
3889
3890 return 0;
3891 }
3892
3893 static void gen8_irq_uninstall(struct drm_device *dev)
3894 {
3895 struct drm_i915_private *dev_priv = dev->dev_private;
3896
3897 if (!dev_priv)
3898 return;
3899
3900 gen8_irq_reset(dev);
3901 }
3902
3903 static void valleyview_irq_uninstall(struct drm_device *dev)
3904 {
3905 struct drm_i915_private *dev_priv = dev->dev_private;
3906 int pipe;
3907
3908 if (!dev_priv)
3909 return;
3910
3911 I915_WRITE(VLV_MASTER_IER, 0);
3912
3913 for_each_pipe(dev_priv, pipe)
3914 I915_WRITE(PIPESTAT(pipe), 0xffff);
3915
3916 I915_WRITE(HWSTAM, 0xffffffff);
3917 I915_WRITE(PORT_HOTPLUG_EN, 0);
3918 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3919
3920 /* Interrupt setup is already guaranteed to be single-threaded, this is
3921 * just to make the assert_spin_locked check happy. */
3922 spin_lock_irq(&dev_priv->irq_lock);
3923 if (dev_priv->display_irqs_enabled)
3924 valleyview_display_irqs_uninstall(dev_priv);
3925 spin_unlock_irq(&dev_priv->irq_lock);
3926
3927 dev_priv->irq_mask = 0;
3928
3929 I915_WRITE(VLV_IIR, 0xffffffff);
3930 I915_WRITE(VLV_IMR, 0xffffffff);
3931 I915_WRITE(VLV_IER, 0x0);
3932 POSTING_READ(VLV_IER);
3933 }
3934
3935 static void cherryview_irq_uninstall(struct drm_device *dev)
3936 {
3937 struct drm_i915_private *dev_priv = dev->dev_private;
3938 int pipe;
3939
3940 if (!dev_priv)
3941 return;
3942
3943 I915_WRITE(GEN8_MASTER_IRQ, 0);
3944 POSTING_READ(GEN8_MASTER_IRQ);
3945
3946 #define GEN8_IRQ_FINI_NDX(type, which) \
3947 do { \
3948 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3949 I915_WRITE(GEN8_##type##_IER(which), 0); \
3950 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3951 POSTING_READ(GEN8_##type##_IIR(which)); \
3952 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3953 } while (0)
3954
3955 #define GEN8_IRQ_FINI(type) \
3956 do { \
3957 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3958 I915_WRITE(GEN8_##type##_IER, 0); \
3959 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3960 POSTING_READ(GEN8_##type##_IIR); \
3961 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3962 } while (0)
3963
3964 GEN8_IRQ_FINI_NDX(GT, 0);
3965 GEN8_IRQ_FINI_NDX(GT, 1);
3966 GEN8_IRQ_FINI_NDX(GT, 2);
3967 GEN8_IRQ_FINI_NDX(GT, 3);
3968
3969 GEN8_IRQ_FINI(PCU);
3970
3971 #undef GEN8_IRQ_FINI
3972 #undef GEN8_IRQ_FINI_NDX
3973
3974 I915_WRITE(PORT_HOTPLUG_EN, 0);
3975 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3976
3977 for_each_pipe(dev_priv, pipe)
3978 I915_WRITE(PIPESTAT(pipe), 0xffff);
3979
3980 I915_WRITE(VLV_IMR, 0xffffffff);
3981 I915_WRITE(VLV_IER, 0x0);
3982 I915_WRITE(VLV_IIR, 0xffffffff);
3983 POSTING_READ(VLV_IIR);
3984 }
3985
3986 static void ironlake_irq_uninstall(struct drm_device *dev)
3987 {
3988 struct drm_i915_private *dev_priv = dev->dev_private;
3989
3990 if (!dev_priv)
3991 return;
3992
3993 ironlake_irq_reset(dev);
3994 }
3995
3996 static void i8xx_irq_preinstall(struct drm_device * dev)
3997 {
3998 struct drm_i915_private *dev_priv = dev->dev_private;
3999 int pipe;
4000
4001 for_each_pipe(dev_priv, pipe)
4002 I915_WRITE(PIPESTAT(pipe), 0);
4003 I915_WRITE16(IMR, 0xffff);
4004 I915_WRITE16(IER, 0x0);
4005 POSTING_READ16(IER);
4006 }
4007
4008 static int i8xx_irq_postinstall(struct drm_device *dev)
4009 {
4010 struct drm_i915_private *dev_priv = dev->dev_private;
4011
4012 I915_WRITE16(EMR,
4013 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4014
4015 /* Unmask the interrupts that we always want on. */
4016 dev_priv->irq_mask =
4017 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4018 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4019 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4020 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4021 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4022 I915_WRITE16(IMR, dev_priv->irq_mask);
4023
4024 I915_WRITE16(IER,
4025 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4026 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4027 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4028 I915_USER_INTERRUPT);
4029 POSTING_READ16(IER);
4030
4031 /* Interrupt setup is already guaranteed to be single-threaded, this is
4032 * just to make the assert_spin_locked check happy. */
4033 spin_lock_irq(&dev_priv->irq_lock);
4034 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4035 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4036 spin_unlock_irq(&dev_priv->irq_lock);
4037
4038 return 0;
4039 }
4040
4041 /*
4042 * Returns true when a page flip has completed.
4043 */
4044 static bool i8xx_handle_vblank(struct drm_device *dev,
4045 int plane, int pipe, u32 iir)
4046 {
4047 struct drm_i915_private *dev_priv = dev->dev_private;
4048 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4049
4050 if (!intel_pipe_handle_vblank(dev, pipe))
4051 return false;
4052
4053 if ((iir & flip_pending) == 0)
4054 goto check_page_flip;
4055
4056 intel_prepare_page_flip(dev, plane);
4057
4058 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4059 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4060 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4061 * the flip is completed (no longer pending). Since this doesn't raise
4062 * an interrupt per se, we watch for the change at vblank.
4063 */
4064 if (I915_READ16(ISR) & flip_pending)
4065 goto check_page_flip;
4066
4067 intel_finish_page_flip(dev, pipe);
4068 return true;
4069
4070 check_page_flip:
4071 intel_check_page_flip(dev, pipe);
4072 return false;
4073 }
4074
4075 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4076 {
4077 struct drm_device *dev = arg;
4078 struct drm_i915_private *dev_priv = dev->dev_private;
4079 u16 iir, new_iir;
4080 u32 pipe_stats[2];
4081 int pipe;
4082 u16 flip_mask =
4083 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4084 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4085
4086 iir = I915_READ16(IIR);
4087 if (iir == 0)
4088 return IRQ_NONE;
4089
4090 while (iir & ~flip_mask) {
4091 /* Can't rely on pipestat interrupt bit in iir as it might
4092 * have been cleared after the pipestat interrupt was received.
4093 * It doesn't set the bit in iir again, but it still produces
4094 * interrupts (for non-MSI).
4095 */
4096 spin_lock(&dev_priv->irq_lock);
4097 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4098 i915_handle_error(dev, false,
4099 "Command parser error, iir 0x%08x",
4100 iir);
4101
4102 for_each_pipe(dev_priv, pipe) {
4103 int reg = PIPESTAT(pipe);
4104 pipe_stats[pipe] = I915_READ(reg);
4105
4106 /*
4107 * Clear the PIPE*STAT regs before the IIR
4108 */
4109 if (pipe_stats[pipe] & 0x8000ffff)
4110 I915_WRITE(reg, pipe_stats[pipe]);
4111 }
4112 spin_unlock(&dev_priv->irq_lock);
4113
4114 I915_WRITE16(IIR, iir & ~flip_mask);
4115 new_iir = I915_READ16(IIR); /* Flush posted writes */
4116
4117 i915_update_dri1_breadcrumb(dev);
4118
4119 if (iir & I915_USER_INTERRUPT)
4120 notify_ring(dev, &dev_priv->ring[RCS]);
4121
4122 for_each_pipe(dev_priv, pipe) {
4123 int plane = pipe;
4124 if (HAS_FBC(dev))
4125 plane = !plane;
4126
4127 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4128 i8xx_handle_vblank(dev, plane, pipe, iir))
4129 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4130
4131 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4132 i9xx_pipe_crc_irq_handler(dev, pipe);
4133
4134 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4135 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4136 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4137 }
4138
4139 iir = new_iir;
4140 }
4141
4142 return IRQ_HANDLED;
4143 }
4144
4145 static void i8xx_irq_uninstall(struct drm_device * dev)
4146 {
4147 struct drm_i915_private *dev_priv = dev->dev_private;
4148 int pipe;
4149
4150 for_each_pipe(dev_priv, pipe) {
4151 /* Clear enable bits; then clear status bits */
4152 I915_WRITE(PIPESTAT(pipe), 0);
4153 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4154 }
4155 I915_WRITE16(IMR, 0xffff);
4156 I915_WRITE16(IER, 0x0);
4157 I915_WRITE16(IIR, I915_READ16(IIR));
4158 }
4159
4160 static void i915_irq_preinstall(struct drm_device * dev)
4161 {
4162 struct drm_i915_private *dev_priv = dev->dev_private;
4163 int pipe;
4164
4165 if (I915_HAS_HOTPLUG(dev)) {
4166 I915_WRITE(PORT_HOTPLUG_EN, 0);
4167 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4168 }
4169
4170 I915_WRITE16(HWSTAM, 0xeffe);
4171 for_each_pipe(dev_priv, pipe)
4172 I915_WRITE(PIPESTAT(pipe), 0);
4173 I915_WRITE(IMR, 0xffffffff);
4174 I915_WRITE(IER, 0x0);
4175 POSTING_READ(IER);
4176 }
4177
4178 static int i915_irq_postinstall(struct drm_device *dev)
4179 {
4180 struct drm_i915_private *dev_priv = dev->dev_private;
4181 u32 enable_mask;
4182
4183 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4184
4185 /* Unmask the interrupts that we always want on. */
4186 dev_priv->irq_mask =
4187 ~(I915_ASLE_INTERRUPT |
4188 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4189 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4190 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4191 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4192 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4193
4194 enable_mask =
4195 I915_ASLE_INTERRUPT |
4196 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4197 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4198 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4199 I915_USER_INTERRUPT;
4200
4201 if (I915_HAS_HOTPLUG(dev)) {
4202 I915_WRITE(PORT_HOTPLUG_EN, 0);
4203 POSTING_READ(PORT_HOTPLUG_EN);
4204
4205 /* Enable in IER... */
4206 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4207 /* and unmask in IMR */
4208 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4209 }
4210
4211 I915_WRITE(IMR, dev_priv->irq_mask);
4212 I915_WRITE(IER, enable_mask);
4213 POSTING_READ(IER);
4214
4215 i915_enable_asle_pipestat(dev);
4216
4217 /* Interrupt setup is already guaranteed to be single-threaded, this is
4218 * just to make the assert_spin_locked check happy. */
4219 spin_lock_irq(&dev_priv->irq_lock);
4220 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4221 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4222 spin_unlock_irq(&dev_priv->irq_lock);
4223
4224 return 0;
4225 }
4226
4227 /*
4228 * Returns true when a page flip has completed.
4229 */
4230 static bool i915_handle_vblank(struct drm_device *dev,
4231 int plane, int pipe, u32 iir)
4232 {
4233 struct drm_i915_private *dev_priv = dev->dev_private;
4234 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4235
4236 if (!intel_pipe_handle_vblank(dev, pipe))
4237 return false;
4238
4239 if ((iir & flip_pending) == 0)
4240 goto check_page_flip;
4241
4242 intel_prepare_page_flip(dev, plane);
4243
4244 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4245 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4246 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4247 * the flip is completed (no longer pending). Since this doesn't raise
4248 * an interrupt per se, we watch for the change at vblank.
4249 */
4250 if (I915_READ(ISR) & flip_pending)
4251 goto check_page_flip;
4252
4253 intel_finish_page_flip(dev, pipe);
4254 return true;
4255
4256 check_page_flip:
4257 intel_check_page_flip(dev, pipe);
4258 return false;
4259 }
4260
4261 static irqreturn_t i915_irq_handler(int irq, void *arg)
4262 {
4263 struct drm_device *dev = arg;
4264 struct drm_i915_private *dev_priv = dev->dev_private;
4265 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4266 u32 flip_mask =
4267 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4268 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4269 int pipe, ret = IRQ_NONE;
4270
4271 iir = I915_READ(IIR);
4272 do {
4273 bool irq_received = (iir & ~flip_mask) != 0;
4274 bool blc_event = false;
4275
4276 /* Can't rely on pipestat interrupt bit in iir as it might
4277 * have been cleared after the pipestat interrupt was received.
4278 * It doesn't set the bit in iir again, but it still produces
4279 * interrupts (for non-MSI).
4280 */
4281 spin_lock(&dev_priv->irq_lock);
4282 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4283 i915_handle_error(dev, false,
4284 "Command parser error, iir 0x%08x",
4285 iir);
4286
4287 for_each_pipe(dev_priv, pipe) {
4288 int reg = PIPESTAT(pipe);
4289 pipe_stats[pipe] = I915_READ(reg);
4290
4291 /* Clear the PIPE*STAT regs before the IIR */
4292 if (pipe_stats[pipe] & 0x8000ffff) {
4293 I915_WRITE(reg, pipe_stats[pipe]);
4294 irq_received = true;
4295 }
4296 }
4297 spin_unlock(&dev_priv->irq_lock);
4298
4299 if (!irq_received)
4300 break;
4301
4302 /* Consume port. Then clear IIR or we'll miss events */
4303 if (I915_HAS_HOTPLUG(dev) &&
4304 iir & I915_DISPLAY_PORT_INTERRUPT)
4305 i9xx_hpd_irq_handler(dev);
4306
4307 I915_WRITE(IIR, iir & ~flip_mask);
4308 new_iir = I915_READ(IIR); /* Flush posted writes */
4309
4310 if (iir & I915_USER_INTERRUPT)
4311 notify_ring(dev, &dev_priv->ring[RCS]);
4312
4313 for_each_pipe(dev_priv, pipe) {
4314 int plane = pipe;
4315 if (HAS_FBC(dev))
4316 plane = !plane;
4317
4318 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4319 i915_handle_vblank(dev, plane, pipe, iir))
4320 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4321
4322 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4323 blc_event = true;
4324
4325 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4326 i9xx_pipe_crc_irq_handler(dev, pipe);
4327
4328 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4329 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4330 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4331 }
4332
4333 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4334 intel_opregion_asle_intr(dev);
4335
4336 /* With MSI, interrupts are only generated when iir
4337 * transitions from zero to nonzero. If another bit got
4338 * set while we were handling the existing iir bits, then
4339 * we would never get another interrupt.
4340 *
4341 * This is fine on non-MSI as well, as if we hit this path
4342 * we avoid exiting the interrupt handler only to generate
4343 * another one.
4344 *
4345 * Note that for MSI this could cause a stray interrupt report
4346 * if an interrupt landed in the time between writing IIR and
4347 * the posting read. This should be rare enough to never
4348 * trigger the 99% of 100,000 interrupts test for disabling
4349 * stray interrupts.
4350 */
4351 ret = IRQ_HANDLED;
4352 iir = new_iir;
4353 } while (iir & ~flip_mask);
4354
4355 i915_update_dri1_breadcrumb(dev);
4356
4357 return ret;
4358 }
4359
4360 static void i915_irq_uninstall(struct drm_device * dev)
4361 {
4362 struct drm_i915_private *dev_priv = dev->dev_private;
4363 int pipe;
4364
4365 if (I915_HAS_HOTPLUG(dev)) {
4366 I915_WRITE(PORT_HOTPLUG_EN, 0);
4367 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4368 }
4369
4370 I915_WRITE16(HWSTAM, 0xffff);
4371 for_each_pipe(dev_priv, pipe) {
4372 /* Clear enable bits; then clear status bits */
4373 I915_WRITE(PIPESTAT(pipe), 0);
4374 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4375 }
4376 I915_WRITE(IMR, 0xffffffff);
4377 I915_WRITE(IER, 0x0);
4378
4379 I915_WRITE(IIR, I915_READ(IIR));
4380 }
4381
4382 static void i965_irq_preinstall(struct drm_device * dev)
4383 {
4384 struct drm_i915_private *dev_priv = dev->dev_private;
4385 int pipe;
4386
4387 I915_WRITE(PORT_HOTPLUG_EN, 0);
4388 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4389
4390 I915_WRITE(HWSTAM, 0xeffe);
4391 for_each_pipe(dev_priv, pipe)
4392 I915_WRITE(PIPESTAT(pipe), 0);
4393 I915_WRITE(IMR, 0xffffffff);
4394 I915_WRITE(IER, 0x0);
4395 POSTING_READ(IER);
4396 }
4397
4398 static int i965_irq_postinstall(struct drm_device *dev)
4399 {
4400 struct drm_i915_private *dev_priv = dev->dev_private;
4401 u32 enable_mask;
4402 u32 error_mask;
4403
4404 /* Unmask the interrupts that we always want on. */
4405 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4406 I915_DISPLAY_PORT_INTERRUPT |
4407 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4408 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4409 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4410 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4411 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4412
4413 enable_mask = ~dev_priv->irq_mask;
4414 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4415 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4416 enable_mask |= I915_USER_INTERRUPT;
4417
4418 if (IS_G4X(dev))
4419 enable_mask |= I915_BSD_USER_INTERRUPT;
4420
4421 /* Interrupt setup is already guaranteed to be single-threaded, this is
4422 * just to make the assert_spin_locked check happy. */
4423 spin_lock_irq(&dev_priv->irq_lock);
4424 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4425 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4426 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4427 spin_unlock_irq(&dev_priv->irq_lock);
4428
4429 /*
4430 * Enable some error detection, note the instruction error mask
4431 * bit is reserved, so we leave it masked.
4432 */
4433 if (IS_G4X(dev)) {
4434 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4435 GM45_ERROR_MEM_PRIV |
4436 GM45_ERROR_CP_PRIV |
4437 I915_ERROR_MEMORY_REFRESH);
4438 } else {
4439 error_mask = ~(I915_ERROR_PAGE_TABLE |
4440 I915_ERROR_MEMORY_REFRESH);
4441 }
4442 I915_WRITE(EMR, error_mask);
4443
4444 I915_WRITE(IMR, dev_priv->irq_mask);
4445 I915_WRITE(IER, enable_mask);
4446 POSTING_READ(IER);
4447
4448 I915_WRITE(PORT_HOTPLUG_EN, 0);
4449 POSTING_READ(PORT_HOTPLUG_EN);
4450
4451 i915_enable_asle_pipestat(dev);
4452
4453 return 0;
4454 }
4455
4456 static void i915_hpd_irq_setup(struct drm_device *dev)
4457 {
4458 struct drm_i915_private *dev_priv = dev->dev_private;
4459 struct intel_encoder *intel_encoder;
4460 u32 hotplug_en;
4461
4462 assert_spin_locked(&dev_priv->irq_lock);
4463
4464 if (I915_HAS_HOTPLUG(dev)) {
4465 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4466 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4467 /* Note HDMI and DP share hotplug bits */
4468 /* enable bits are the same for all generations */
4469 for_each_intel_encoder(dev, intel_encoder)
4470 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4471 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4472 /* Programming the CRT detection parameters tends
4473 to generate a spurious hotplug event about three
4474 seconds later. So just do it once.
4475 */
4476 if (IS_G4X(dev))
4477 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4478 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4479 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4480
4481 /* Ignore TV since it's buggy */
4482 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4483 }
4484 }
4485
4486 static irqreturn_t i965_irq_handler(int irq, void *arg)
4487 {
4488 struct drm_device *dev = arg;
4489 struct drm_i915_private *dev_priv = dev->dev_private;
4490 u32 iir, new_iir;
4491 u32 pipe_stats[I915_MAX_PIPES];
4492 int ret = IRQ_NONE, pipe;
4493 u32 flip_mask =
4494 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4495 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4496
4497 iir = I915_READ(IIR);
4498
4499 for (;;) {
4500 bool irq_received = (iir & ~flip_mask) != 0;
4501 bool blc_event = false;
4502
4503 /* Can't rely on pipestat interrupt bit in iir as it might
4504 * have been cleared after the pipestat interrupt was received.
4505 * It doesn't set the bit in iir again, but it still produces
4506 * interrupts (for non-MSI).
4507 */
4508 spin_lock(&dev_priv->irq_lock);
4509 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4510 i915_handle_error(dev, false,
4511 "Command parser error, iir 0x%08x",
4512 iir);
4513
4514 for_each_pipe(dev_priv, pipe) {
4515 int reg = PIPESTAT(pipe);
4516 pipe_stats[pipe] = I915_READ(reg);
4517
4518 /*
4519 * Clear the PIPE*STAT regs before the IIR
4520 */
4521 if (pipe_stats[pipe] & 0x8000ffff) {
4522 I915_WRITE(reg, pipe_stats[pipe]);
4523 irq_received = true;
4524 }
4525 }
4526 spin_unlock(&dev_priv->irq_lock);
4527
4528 if (!irq_received)
4529 break;
4530
4531 ret = IRQ_HANDLED;
4532
4533 /* Consume port. Then clear IIR or we'll miss events */
4534 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4535 i9xx_hpd_irq_handler(dev);
4536
4537 I915_WRITE(IIR, iir & ~flip_mask);
4538 new_iir = I915_READ(IIR); /* Flush posted writes */
4539
4540 if (iir & I915_USER_INTERRUPT)
4541 notify_ring(dev, &dev_priv->ring[RCS]);
4542 if (iir & I915_BSD_USER_INTERRUPT)
4543 notify_ring(dev, &dev_priv->ring[VCS]);
4544
4545 for_each_pipe(dev_priv, pipe) {
4546 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4547 i915_handle_vblank(dev, pipe, pipe, iir))
4548 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4549
4550 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4551 blc_event = true;
4552
4553 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4554 i9xx_pipe_crc_irq_handler(dev, pipe);
4555
4556 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4557 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4558 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4559 }
4560
4561 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4562 intel_opregion_asle_intr(dev);
4563
4564 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4565 gmbus_irq_handler(dev);
4566
4567 /* With MSI, interrupts are only generated when iir
4568 * transitions from zero to nonzero. If another bit got
4569 * set while we were handling the existing iir bits, then
4570 * we would never get another interrupt.
4571 *
4572 * This is fine on non-MSI as well, as if we hit this path
4573 * we avoid exiting the interrupt handler only to generate
4574 * another one.
4575 *
4576 * Note that for MSI this could cause a stray interrupt report
4577 * if an interrupt landed in the time between writing IIR and
4578 * the posting read. This should be rare enough to never
4579 * trigger the 99% of 100,000 interrupts test for disabling
4580 * stray interrupts.
4581 */
4582 iir = new_iir;
4583 }
4584
4585 i915_update_dri1_breadcrumb(dev);
4586
4587 return ret;
4588 }
4589
4590 static void i965_irq_uninstall(struct drm_device * dev)
4591 {
4592 struct drm_i915_private *dev_priv = dev->dev_private;
4593 int pipe;
4594
4595 if (!dev_priv)
4596 return;
4597
4598 I915_WRITE(PORT_HOTPLUG_EN, 0);
4599 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4600
4601 I915_WRITE(HWSTAM, 0xffffffff);
4602 for_each_pipe(dev_priv, pipe)
4603 I915_WRITE(PIPESTAT(pipe), 0);
4604 I915_WRITE(IMR, 0xffffffff);
4605 I915_WRITE(IER, 0x0);
4606
4607 for_each_pipe(dev_priv, pipe)
4608 I915_WRITE(PIPESTAT(pipe),
4609 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4610 I915_WRITE(IIR, I915_READ(IIR));
4611 }
4612
4613 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4614 {
4615 struct drm_i915_private *dev_priv =
4616 container_of(work, typeof(*dev_priv),
4617 hotplug_reenable_work.work);
4618 struct drm_device *dev = dev_priv->dev;
4619 struct drm_mode_config *mode_config = &dev->mode_config;
4620 int i;
4621
4622 intel_runtime_pm_get(dev_priv);
4623
4624 spin_lock_irq(&dev_priv->irq_lock);
4625 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4626 struct drm_connector *connector;
4627
4628 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4629 continue;
4630
4631 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4632
4633 list_for_each_entry(connector, &mode_config->connector_list, head) {
4634 struct intel_connector *intel_connector = to_intel_connector(connector);
4635
4636 if (intel_connector->encoder->hpd_pin == i) {
4637 if (connector->polled != intel_connector->polled)
4638 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4639 connector->name);
4640 connector->polled = intel_connector->polled;
4641 if (!connector->polled)
4642 connector->polled = DRM_CONNECTOR_POLL_HPD;
4643 }
4644 }
4645 }
4646 if (dev_priv->display.hpd_irq_setup)
4647 dev_priv->display.hpd_irq_setup(dev);
4648 spin_unlock_irq(&dev_priv->irq_lock);
4649
4650 intel_runtime_pm_put(dev_priv);
4651 }
4652
4653 void intel_irq_init(struct drm_device *dev)
4654 {
4655 struct drm_i915_private *dev_priv = dev->dev_private;
4656
4657 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4658 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4659 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4660 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4661 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4662
4663 /* Let's track the enabled rps events */
4664 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
4665 /* WaGsvRC0ResidencyMethod:vlv */
4666 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4667 else
4668 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4669
4670 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4671 i915_hangcheck_elapsed,
4672 (unsigned long) dev);
4673 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4674 intel_hpd_irq_reenable_work);
4675
4676 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4677
4678 /* Haven't installed the IRQ handler yet */
4679 dev_priv->pm._irqs_disabled = true;
4680
4681 if (IS_GEN2(dev)) {
4682 dev->max_vblank_count = 0;
4683 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4684 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4685 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4686 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4687 } else {
4688 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4689 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4690 }
4691
4692 /*
4693 * Opt out of the vblank disable timer on everything except gen2.
4694 * Gen2 doesn't have a hardware frame counter and so depends on
4695 * vblank interrupts to produce sane vblank seuquence numbers.
4696 */
4697 if (!IS_GEN2(dev))
4698 dev->vblank_disable_immediate = true;
4699
4700 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4701 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4702 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4703 }
4704
4705 if (IS_CHERRYVIEW(dev)) {
4706 dev->driver->irq_handler = cherryview_irq_handler;
4707 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4708 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4709 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4710 dev->driver->enable_vblank = valleyview_enable_vblank;
4711 dev->driver->disable_vblank = valleyview_disable_vblank;
4712 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4713 } else if (IS_VALLEYVIEW(dev)) {
4714 dev->driver->irq_handler = valleyview_irq_handler;
4715 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4716 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4717 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4718 dev->driver->enable_vblank = valleyview_enable_vblank;
4719 dev->driver->disable_vblank = valleyview_disable_vblank;
4720 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4721 } else if (INTEL_INFO(dev)->gen >= 8) {
4722 dev->driver->irq_handler = gen8_irq_handler;
4723 dev->driver->irq_preinstall = gen8_irq_reset;
4724 dev->driver->irq_postinstall = gen8_irq_postinstall;
4725 dev->driver->irq_uninstall = gen8_irq_uninstall;
4726 dev->driver->enable_vblank = gen8_enable_vblank;
4727 dev->driver->disable_vblank = gen8_disable_vblank;
4728 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4729 } else if (HAS_PCH_SPLIT(dev)) {
4730 dev->driver->irq_handler = ironlake_irq_handler;
4731 dev->driver->irq_preinstall = ironlake_irq_reset;
4732 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4733 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4734 dev->driver->enable_vblank = ironlake_enable_vblank;
4735 dev->driver->disable_vblank = ironlake_disable_vblank;
4736 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4737 } else {
4738 if (INTEL_INFO(dev)->gen == 2) {
4739 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4740 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4741 dev->driver->irq_handler = i8xx_irq_handler;
4742 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4743 } else if (INTEL_INFO(dev)->gen == 3) {
4744 dev->driver->irq_preinstall = i915_irq_preinstall;
4745 dev->driver->irq_postinstall = i915_irq_postinstall;
4746 dev->driver->irq_uninstall = i915_irq_uninstall;
4747 dev->driver->irq_handler = i915_irq_handler;
4748 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4749 } else {
4750 dev->driver->irq_preinstall = i965_irq_preinstall;
4751 dev->driver->irq_postinstall = i965_irq_postinstall;
4752 dev->driver->irq_uninstall = i965_irq_uninstall;
4753 dev->driver->irq_handler = i965_irq_handler;
4754 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4755 }
4756 dev->driver->enable_vblank = i915_enable_vblank;
4757 dev->driver->disable_vblank = i915_disable_vblank;
4758 }
4759 }
4760
4761 void intel_hpd_init(struct drm_device *dev)
4762 {
4763 struct drm_i915_private *dev_priv = dev->dev_private;
4764 struct drm_mode_config *mode_config = &dev->mode_config;
4765 struct drm_connector *connector;
4766 int i;
4767
4768 for (i = 1; i < HPD_NUM_PINS; i++) {
4769 dev_priv->hpd_stats[i].hpd_cnt = 0;
4770 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4771 }
4772 list_for_each_entry(connector, &mode_config->connector_list, head) {
4773 struct intel_connector *intel_connector = to_intel_connector(connector);
4774 connector->polled = intel_connector->polled;
4775 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4776 connector->polled = DRM_CONNECTOR_POLL_HPD;
4777 if (intel_connector->mst_port)
4778 connector->polled = DRM_CONNECTOR_POLL_HPD;
4779 }
4780
4781 /* Interrupt setup is already guaranteed to be single-threaded, this is
4782 * just to make the assert_spin_locked checks happy. */
4783 spin_lock_irq(&dev_priv->irq_lock);
4784 if (dev_priv->display.hpd_irq_setup)
4785 dev_priv->display.hpd_irq_setup(dev);
4786 spin_unlock_irq(&dev_priv->irq_lock);
4787 }
4788
4789 /* Disable interrupts so we can allow runtime PM. */
4790 void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4791 {
4792 struct drm_i915_private *dev_priv = dev->dev_private;
4793
4794 dev->driver->irq_uninstall(dev);
4795 dev_priv->pm._irqs_disabled = true;
4796 }
4797
4798 /* Restore interrupts so we can recover from runtime PM. */
4799 void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4800 {
4801 struct drm_i915_private *dev_priv = dev->dev_private;
4802
4803 dev_priv->pm._irqs_disabled = false;
4804 dev->driver->irq_preinstall(dev);
4805 dev->driver->irq_postinstall(dev);
4806 }
This page took 0.123657 seconds and 6 git commands to generate.