drm/i915: Clarify irq_lock locking, special cases
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 static const u32 hpd_ibx[] = {
41 [HPD_CRT] = SDE_CRT_HOTPLUG,
42 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
43 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
44 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
45 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
46 };
47
48 static const u32 hpd_cpt[] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
54 };
55
56 static const u32 hpd_mask_i915[] = {
57 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
58 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
59 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
60 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
61 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
62 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
63 };
64
65 static const u32 hpd_status_g4x[] = {
66 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
67 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
68 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
69 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
70 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
71 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
72 };
73
74 static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
75 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
76 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
77 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
78 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
79 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
80 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
81 };
82
83 /* IIR can theoretically queue up two events. Be paranoid. */
84 #define GEN8_IRQ_RESET_NDX(type, which) do { \
85 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
86 POSTING_READ(GEN8_##type##_IMR(which)); \
87 I915_WRITE(GEN8_##type##_IER(which), 0); \
88 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
89 POSTING_READ(GEN8_##type##_IIR(which)); \
90 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
91 POSTING_READ(GEN8_##type##_IIR(which)); \
92 } while (0)
93
94 #define GEN5_IRQ_RESET(type) do { \
95 I915_WRITE(type##IMR, 0xffffffff); \
96 POSTING_READ(type##IMR); \
97 I915_WRITE(type##IER, 0); \
98 I915_WRITE(type##IIR, 0xffffffff); \
99 POSTING_READ(type##IIR); \
100 I915_WRITE(type##IIR, 0xffffffff); \
101 POSTING_READ(type##IIR); \
102 } while (0)
103
104 /*
105 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
106 */
107 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
108 u32 val = I915_READ(reg); \
109 if (val) { \
110 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
111 (reg), val); \
112 I915_WRITE((reg), 0xffffffff); \
113 POSTING_READ(reg); \
114 I915_WRITE((reg), 0xffffffff); \
115 POSTING_READ(reg); \
116 } \
117 } while (0)
118
119 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
120 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
121 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
122 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
123 POSTING_READ(GEN8_##type##_IER(which)); \
124 } while (0)
125
126 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
127 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
128 I915_WRITE(type##IMR, (imr_val)); \
129 I915_WRITE(type##IER, (ier_val)); \
130 POSTING_READ(type##IER); \
131 } while (0)
132
133 /* For display hotplug interrupt */
134 static void
135 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
136 {
137 assert_spin_locked(&dev_priv->irq_lock);
138
139 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
140 return;
141
142 if ((dev_priv->irq_mask & mask) != 0) {
143 dev_priv->irq_mask &= ~mask;
144 I915_WRITE(DEIMR, dev_priv->irq_mask);
145 POSTING_READ(DEIMR);
146 }
147 }
148
149 static void
150 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
151 {
152 assert_spin_locked(&dev_priv->irq_lock);
153
154 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
155 return;
156
157 if ((dev_priv->irq_mask & mask) != mask) {
158 dev_priv->irq_mask |= mask;
159 I915_WRITE(DEIMR, dev_priv->irq_mask);
160 POSTING_READ(DEIMR);
161 }
162 }
163
164 /**
165 * ilk_update_gt_irq - update GTIMR
166 * @dev_priv: driver private
167 * @interrupt_mask: mask of interrupt bits to update
168 * @enabled_irq_mask: mask of interrupt bits to enable
169 */
170 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
171 uint32_t interrupt_mask,
172 uint32_t enabled_irq_mask)
173 {
174 assert_spin_locked(&dev_priv->irq_lock);
175
176 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
177 return;
178
179 dev_priv->gt_irq_mask &= ~interrupt_mask;
180 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
181 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
182 POSTING_READ(GTIMR);
183 }
184
185 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
186 {
187 ilk_update_gt_irq(dev_priv, mask, mask);
188 }
189
190 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
191 {
192 ilk_update_gt_irq(dev_priv, mask, 0);
193 }
194
195 /**
196 * snb_update_pm_irq - update GEN6_PMIMR
197 * @dev_priv: driver private
198 * @interrupt_mask: mask of interrupt bits to update
199 * @enabled_irq_mask: mask of interrupt bits to enable
200 */
201 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
202 uint32_t interrupt_mask,
203 uint32_t enabled_irq_mask)
204 {
205 uint32_t new_val;
206
207 assert_spin_locked(&dev_priv->irq_lock);
208
209 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
210 return;
211
212 new_val = dev_priv->pm_irq_mask;
213 new_val &= ~interrupt_mask;
214 new_val |= (~enabled_irq_mask & interrupt_mask);
215
216 if (new_val != dev_priv->pm_irq_mask) {
217 dev_priv->pm_irq_mask = new_val;
218 I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask);
219 POSTING_READ(GEN6_PMIMR);
220 }
221 }
222
223 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
224 {
225 snb_update_pm_irq(dev_priv, mask, mask);
226 }
227
228 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
229 {
230 snb_update_pm_irq(dev_priv, mask, 0);
231 }
232
233 static bool ivb_can_enable_err_int(struct drm_device *dev)
234 {
235 struct drm_i915_private *dev_priv = dev->dev_private;
236 struct intel_crtc *crtc;
237 enum pipe pipe;
238
239 assert_spin_locked(&dev_priv->irq_lock);
240
241 for_each_pipe(dev_priv, pipe) {
242 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
243
244 if (crtc->cpu_fifo_underrun_disabled)
245 return false;
246 }
247
248 return true;
249 }
250
251 /**
252 * bdw_update_pm_irq - update GT interrupt 2
253 * @dev_priv: driver private
254 * @interrupt_mask: mask of interrupt bits to update
255 * @enabled_irq_mask: mask of interrupt bits to enable
256 *
257 * Copied from the snb function, updated with relevant register offsets
258 */
259 static void bdw_update_pm_irq(struct drm_i915_private *dev_priv,
260 uint32_t interrupt_mask,
261 uint32_t enabled_irq_mask)
262 {
263 uint32_t new_val;
264
265 assert_spin_locked(&dev_priv->irq_lock);
266
267 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
268 return;
269
270 new_val = dev_priv->pm_irq_mask;
271 new_val &= ~interrupt_mask;
272 new_val |= (~enabled_irq_mask & interrupt_mask);
273
274 if (new_val != dev_priv->pm_irq_mask) {
275 dev_priv->pm_irq_mask = new_val;
276 I915_WRITE(GEN8_GT_IMR(2), dev_priv->pm_irq_mask);
277 POSTING_READ(GEN8_GT_IMR(2));
278 }
279 }
280
281 void gen8_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
282 {
283 bdw_update_pm_irq(dev_priv, mask, mask);
284 }
285
286 void gen8_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
287 {
288 bdw_update_pm_irq(dev_priv, mask, 0);
289 }
290
291 static bool cpt_can_enable_serr_int(struct drm_device *dev)
292 {
293 struct drm_i915_private *dev_priv = dev->dev_private;
294 enum pipe pipe;
295 struct intel_crtc *crtc;
296
297 assert_spin_locked(&dev_priv->irq_lock);
298
299 for_each_pipe(dev_priv, pipe) {
300 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
301
302 if (crtc->pch_fifo_underrun_disabled)
303 return false;
304 }
305
306 return true;
307 }
308
309 void i9xx_check_fifo_underruns(struct drm_device *dev)
310 {
311 struct drm_i915_private *dev_priv = dev->dev_private;
312 struct intel_crtc *crtc;
313
314 spin_lock_irq(&dev_priv->irq_lock);
315
316 for_each_intel_crtc(dev, crtc) {
317 u32 reg = PIPESTAT(crtc->pipe);
318 u32 pipestat;
319
320 if (crtc->cpu_fifo_underrun_disabled)
321 continue;
322
323 pipestat = I915_READ(reg) & 0xffff0000;
324 if ((pipestat & PIPE_FIFO_UNDERRUN_STATUS) == 0)
325 continue;
326
327 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
328 POSTING_READ(reg);
329
330 DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
331 }
332
333 spin_unlock_irq(&dev_priv->irq_lock);
334 }
335
336 static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
337 enum pipe pipe,
338 bool enable, bool old)
339 {
340 struct drm_i915_private *dev_priv = dev->dev_private;
341 u32 reg = PIPESTAT(pipe);
342 u32 pipestat = I915_READ(reg) & 0xffff0000;
343
344 assert_spin_locked(&dev_priv->irq_lock);
345
346 if (enable) {
347 I915_WRITE(reg, pipestat | PIPE_FIFO_UNDERRUN_STATUS);
348 POSTING_READ(reg);
349 } else {
350 if (old && pipestat & PIPE_FIFO_UNDERRUN_STATUS)
351 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
352 }
353 }
354
355 static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
356 enum pipe pipe, bool enable)
357 {
358 struct drm_i915_private *dev_priv = dev->dev_private;
359 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
360 DE_PIPEB_FIFO_UNDERRUN;
361
362 if (enable)
363 ironlake_enable_display_irq(dev_priv, bit);
364 else
365 ironlake_disable_display_irq(dev_priv, bit);
366 }
367
368 static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
369 enum pipe pipe,
370 bool enable, bool old)
371 {
372 struct drm_i915_private *dev_priv = dev->dev_private;
373 if (enable) {
374 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
375
376 if (!ivb_can_enable_err_int(dev))
377 return;
378
379 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
380 } else {
381 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
382
383 if (old &&
384 I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
385 DRM_ERROR("uncleared fifo underrun on pipe %c\n",
386 pipe_name(pipe));
387 }
388 }
389 }
390
391 static void broadwell_set_fifo_underrun_reporting(struct drm_device *dev,
392 enum pipe pipe, bool enable)
393 {
394 struct drm_i915_private *dev_priv = dev->dev_private;
395
396 assert_spin_locked(&dev_priv->irq_lock);
397
398 if (enable)
399 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_FIFO_UNDERRUN;
400 else
401 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_FIFO_UNDERRUN;
402 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
403 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
404 }
405
406 /**
407 * ibx_display_interrupt_update - update SDEIMR
408 * @dev_priv: driver private
409 * @interrupt_mask: mask of interrupt bits to update
410 * @enabled_irq_mask: mask of interrupt bits to enable
411 */
412 static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
413 uint32_t interrupt_mask,
414 uint32_t enabled_irq_mask)
415 {
416 uint32_t sdeimr = I915_READ(SDEIMR);
417 sdeimr &= ~interrupt_mask;
418 sdeimr |= (~enabled_irq_mask & interrupt_mask);
419
420 assert_spin_locked(&dev_priv->irq_lock);
421
422 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
423 return;
424
425 I915_WRITE(SDEIMR, sdeimr);
426 POSTING_READ(SDEIMR);
427 }
428 #define ibx_enable_display_interrupt(dev_priv, bits) \
429 ibx_display_interrupt_update((dev_priv), (bits), (bits))
430 #define ibx_disable_display_interrupt(dev_priv, bits) \
431 ibx_display_interrupt_update((dev_priv), (bits), 0)
432
433 static void ibx_set_fifo_underrun_reporting(struct drm_device *dev,
434 enum transcoder pch_transcoder,
435 bool enable)
436 {
437 struct drm_i915_private *dev_priv = dev->dev_private;
438 uint32_t bit = (pch_transcoder == TRANSCODER_A) ?
439 SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER;
440
441 if (enable)
442 ibx_enable_display_interrupt(dev_priv, bit);
443 else
444 ibx_disable_display_interrupt(dev_priv, bit);
445 }
446
447 static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
448 enum transcoder pch_transcoder,
449 bool enable, bool old)
450 {
451 struct drm_i915_private *dev_priv = dev->dev_private;
452
453 if (enable) {
454 I915_WRITE(SERR_INT,
455 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
456
457 if (!cpt_can_enable_serr_int(dev))
458 return;
459
460 ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT);
461 } else {
462 ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
463
464 if (old && I915_READ(SERR_INT) &
465 SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
466 DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
467 transcoder_name(pch_transcoder));
468 }
469 }
470 }
471
472 /**
473 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
474 * @dev: drm device
475 * @pipe: pipe
476 * @enable: true if we want to report FIFO underrun errors, false otherwise
477 *
478 * This function makes us disable or enable CPU fifo underruns for a specific
479 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
480 * reporting for one pipe may also disable all the other CPU error interruts for
481 * the other pipes, due to the fact that there's just one interrupt mask/enable
482 * bit for all the pipes.
483 *
484 * Returns the previous state of underrun reporting.
485 */
486 static bool __intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
487 enum pipe pipe, bool enable)
488 {
489 struct drm_i915_private *dev_priv = dev->dev_private;
490 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
492 bool old;
493
494 assert_spin_locked(&dev_priv->irq_lock);
495
496 old = !intel_crtc->cpu_fifo_underrun_disabled;
497 intel_crtc->cpu_fifo_underrun_disabled = !enable;
498
499 if (HAS_GMCH_DISPLAY(dev))
500 i9xx_set_fifo_underrun_reporting(dev, pipe, enable, old);
501 else if (IS_GEN5(dev) || IS_GEN6(dev))
502 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
503 else if (IS_GEN7(dev))
504 ivybridge_set_fifo_underrun_reporting(dev, pipe, enable, old);
505 else if (IS_GEN8(dev))
506 broadwell_set_fifo_underrun_reporting(dev, pipe, enable);
507
508 return old;
509 }
510
511 bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
512 enum pipe pipe, bool enable)
513 {
514 struct drm_i915_private *dev_priv = dev->dev_private;
515 unsigned long flags;
516 bool ret;
517
518 spin_lock_irqsave(&dev_priv->irq_lock, flags);
519 ret = __intel_set_cpu_fifo_underrun_reporting(dev, pipe, enable);
520 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
521
522 return ret;
523 }
524
525 static bool __cpu_fifo_underrun_reporting_enabled(struct drm_device *dev,
526 enum pipe pipe)
527 {
528 struct drm_i915_private *dev_priv = dev->dev_private;
529 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
530 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
531
532 return !intel_crtc->cpu_fifo_underrun_disabled;
533 }
534
535 /**
536 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
537 * @dev: drm device
538 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
539 * @enable: true if we want to report FIFO underrun errors, false otherwise
540 *
541 * This function makes us disable or enable PCH fifo underruns for a specific
542 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
543 * underrun reporting for one transcoder may also disable all the other PCH
544 * error interruts for the other transcoders, due to the fact that there's just
545 * one interrupt mask/enable bit for all the transcoders.
546 *
547 * Returns the previous state of underrun reporting.
548 */
549 bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
550 enum transcoder pch_transcoder,
551 bool enable)
552 {
553 struct drm_i915_private *dev_priv = dev->dev_private;
554 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
555 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
556 unsigned long flags;
557 bool old;
558
559 /*
560 * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT
561 * has only one pch transcoder A that all pipes can use. To avoid racy
562 * pch transcoder -> pipe lookups from interrupt code simply store the
563 * underrun statistics in crtc A. Since we never expose this anywhere
564 * nor use it outside of the fifo underrun code here using the "wrong"
565 * crtc on LPT won't cause issues.
566 */
567
568 spin_lock_irqsave(&dev_priv->irq_lock, flags);
569
570 old = !intel_crtc->pch_fifo_underrun_disabled;
571 intel_crtc->pch_fifo_underrun_disabled = !enable;
572
573 if (HAS_PCH_IBX(dev))
574 ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
575 else
576 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable, old);
577
578 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
579 return old;
580 }
581
582
583 static void
584 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
585 u32 enable_mask, u32 status_mask)
586 {
587 u32 reg = PIPESTAT(pipe);
588 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
589
590 assert_spin_locked(&dev_priv->irq_lock);
591 WARN_ON(!intel_irqs_enabled(dev_priv));
592
593 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
594 status_mask & ~PIPESTAT_INT_STATUS_MASK,
595 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
596 pipe_name(pipe), enable_mask, status_mask))
597 return;
598
599 if ((pipestat & enable_mask) == enable_mask)
600 return;
601
602 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
603
604 /* Enable the interrupt, clear any pending status */
605 pipestat |= enable_mask | status_mask;
606 I915_WRITE(reg, pipestat);
607 POSTING_READ(reg);
608 }
609
610 static void
611 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
612 u32 enable_mask, u32 status_mask)
613 {
614 u32 reg = PIPESTAT(pipe);
615 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
616
617 assert_spin_locked(&dev_priv->irq_lock);
618 WARN_ON(!intel_irqs_enabled(dev_priv));
619
620 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
621 status_mask & ~PIPESTAT_INT_STATUS_MASK,
622 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
623 pipe_name(pipe), enable_mask, status_mask))
624 return;
625
626 if ((pipestat & enable_mask) == 0)
627 return;
628
629 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
630
631 pipestat &= ~enable_mask;
632 I915_WRITE(reg, pipestat);
633 POSTING_READ(reg);
634 }
635
636 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
637 {
638 u32 enable_mask = status_mask << 16;
639
640 /*
641 * On pipe A we don't support the PSR interrupt yet,
642 * on pipe B and C the same bit MBZ.
643 */
644 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
645 return 0;
646 /*
647 * On pipe B and C we don't support the PSR interrupt yet, on pipe
648 * A the same bit is for perf counters which we don't use either.
649 */
650 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
651 return 0;
652
653 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
654 SPRITE0_FLIP_DONE_INT_EN_VLV |
655 SPRITE1_FLIP_DONE_INT_EN_VLV);
656 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
657 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
658 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
659 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
660
661 return enable_mask;
662 }
663
664 void
665 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
666 u32 status_mask)
667 {
668 u32 enable_mask;
669
670 if (IS_VALLEYVIEW(dev_priv->dev))
671 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
672 status_mask);
673 else
674 enable_mask = status_mask << 16;
675 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
676 }
677
678 void
679 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
680 u32 status_mask)
681 {
682 u32 enable_mask;
683
684 if (IS_VALLEYVIEW(dev_priv->dev))
685 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
686 status_mask);
687 else
688 enable_mask = status_mask << 16;
689 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
690 }
691
692 /**
693 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
694 */
695 static void i915_enable_asle_pipestat(struct drm_device *dev)
696 {
697 struct drm_i915_private *dev_priv = dev->dev_private;
698
699 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
700 return;
701
702 spin_lock_irq(&dev_priv->irq_lock);
703
704 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
705 if (INTEL_INFO(dev)->gen >= 4)
706 i915_enable_pipestat(dev_priv, PIPE_A,
707 PIPE_LEGACY_BLC_EVENT_STATUS);
708
709 spin_unlock_irq(&dev_priv->irq_lock);
710 }
711
712 /**
713 * i915_pipe_enabled - check if a pipe is enabled
714 * @dev: DRM device
715 * @pipe: pipe to check
716 *
717 * Reading certain registers when the pipe is disabled can hang the chip.
718 * Use this routine to make sure the PLL is running and the pipe is active
719 * before reading such registers if unsure.
720 */
721 static int
722 i915_pipe_enabled(struct drm_device *dev, int pipe)
723 {
724 struct drm_i915_private *dev_priv = dev->dev_private;
725
726 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
727 /* Locking is horribly broken here, but whatever. */
728 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
729 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
730
731 return intel_crtc->active;
732 } else {
733 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
734 }
735 }
736
737 /*
738 * This timing diagram depicts the video signal in and
739 * around the vertical blanking period.
740 *
741 * Assumptions about the fictitious mode used in this example:
742 * vblank_start >= 3
743 * vsync_start = vblank_start + 1
744 * vsync_end = vblank_start + 2
745 * vtotal = vblank_start + 3
746 *
747 * start of vblank:
748 * latch double buffered registers
749 * increment frame counter (ctg+)
750 * generate start of vblank interrupt (gen4+)
751 * |
752 * | frame start:
753 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
754 * | may be shifted forward 1-3 extra lines via PIPECONF
755 * | |
756 * | | start of vsync:
757 * | | generate vsync interrupt
758 * | | |
759 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
760 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
761 * ----va---> <-----------------vb--------------------> <--------va-------------
762 * | | <----vs-----> |
763 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
764 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
765 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
766 * | | |
767 * last visible pixel first visible pixel
768 * | increment frame counter (gen3/4)
769 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
770 *
771 * x = horizontal active
772 * _ = horizontal blanking
773 * hs = horizontal sync
774 * va = vertical active
775 * vb = vertical blanking
776 * vs = vertical sync
777 * vbs = vblank_start (number)
778 *
779 * Summary:
780 * - most events happen at the start of horizontal sync
781 * - frame start happens at the start of horizontal blank, 1-4 lines
782 * (depending on PIPECONF settings) after the start of vblank
783 * - gen3/4 pixel and frame counter are synchronized with the start
784 * of horizontal active on the first line of vertical active
785 */
786
787 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
788 {
789 /* Gen2 doesn't have a hardware frame counter */
790 return 0;
791 }
792
793 /* Called from drm generic code, passed a 'crtc', which
794 * we use as a pipe index
795 */
796 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
797 {
798 struct drm_i915_private *dev_priv = dev->dev_private;
799 unsigned long high_frame;
800 unsigned long low_frame;
801 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
802
803 if (!i915_pipe_enabled(dev, pipe)) {
804 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
805 "pipe %c\n", pipe_name(pipe));
806 return 0;
807 }
808
809 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
810 struct intel_crtc *intel_crtc =
811 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
812 const struct drm_display_mode *mode =
813 &intel_crtc->config.adjusted_mode;
814
815 htotal = mode->crtc_htotal;
816 hsync_start = mode->crtc_hsync_start;
817 vbl_start = mode->crtc_vblank_start;
818 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
819 vbl_start = DIV_ROUND_UP(vbl_start, 2);
820 } else {
821 enum transcoder cpu_transcoder = (enum transcoder) pipe;
822
823 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
824 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
825 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
826 if ((I915_READ(PIPECONF(cpu_transcoder)) &
827 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
828 vbl_start = DIV_ROUND_UP(vbl_start, 2);
829 }
830
831 /* Convert to pixel count */
832 vbl_start *= htotal;
833
834 /* Start of vblank event occurs at start of hsync */
835 vbl_start -= htotal - hsync_start;
836
837 high_frame = PIPEFRAME(pipe);
838 low_frame = PIPEFRAMEPIXEL(pipe);
839
840 /*
841 * High & low register fields aren't synchronized, so make sure
842 * we get a low value that's stable across two reads of the high
843 * register.
844 */
845 do {
846 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
847 low = I915_READ(low_frame);
848 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
849 } while (high1 != high2);
850
851 high1 >>= PIPE_FRAME_HIGH_SHIFT;
852 pixel = low & PIPE_PIXEL_MASK;
853 low >>= PIPE_FRAME_LOW_SHIFT;
854
855 /*
856 * The frame counter increments at beginning of active.
857 * Cook up a vblank counter by also checking the pixel
858 * counter against vblank start.
859 */
860 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
861 }
862
863 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
864 {
865 struct drm_i915_private *dev_priv = dev->dev_private;
866 int reg = PIPE_FRMCOUNT_GM45(pipe);
867
868 if (!i915_pipe_enabled(dev, pipe)) {
869 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
870 "pipe %c\n", pipe_name(pipe));
871 return 0;
872 }
873
874 return I915_READ(reg);
875 }
876
877 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
878 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
879
880 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
881 {
882 struct drm_device *dev = crtc->base.dev;
883 struct drm_i915_private *dev_priv = dev->dev_private;
884 const struct drm_display_mode *mode = &crtc->config.adjusted_mode;
885 enum pipe pipe = crtc->pipe;
886 int position, vtotal;
887
888 vtotal = mode->crtc_vtotal;
889 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
890 vtotal /= 2;
891
892 if (IS_GEN2(dev))
893 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
894 else
895 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
896
897 /*
898 * See update_scanline_offset() for the details on the
899 * scanline_offset adjustment.
900 */
901 return (position + crtc->scanline_offset) % vtotal;
902 }
903
904 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
905 unsigned int flags, int *vpos, int *hpos,
906 ktime_t *stime, ktime_t *etime)
907 {
908 struct drm_i915_private *dev_priv = dev->dev_private;
909 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
910 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
911 const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode;
912 int position;
913 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
914 bool in_vbl = true;
915 int ret = 0;
916 unsigned long irqflags;
917
918 if (!intel_crtc->active) {
919 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
920 "pipe %c\n", pipe_name(pipe));
921 return 0;
922 }
923
924 htotal = mode->crtc_htotal;
925 hsync_start = mode->crtc_hsync_start;
926 vtotal = mode->crtc_vtotal;
927 vbl_start = mode->crtc_vblank_start;
928 vbl_end = mode->crtc_vblank_end;
929
930 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
931 vbl_start = DIV_ROUND_UP(vbl_start, 2);
932 vbl_end /= 2;
933 vtotal /= 2;
934 }
935
936 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
937
938 /*
939 * Lock uncore.lock, as we will do multiple timing critical raw
940 * register reads, potentially with preemption disabled, so the
941 * following code must not block on uncore.lock.
942 */
943 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
944
945 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
946
947 /* Get optional system timestamp before query. */
948 if (stime)
949 *stime = ktime_get();
950
951 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
952 /* No obvious pixelcount register. Only query vertical
953 * scanout position from Display scan line register.
954 */
955 position = __intel_get_crtc_scanline(intel_crtc);
956 } else {
957 /* Have access to pixelcount since start of frame.
958 * We can split this into vertical and horizontal
959 * scanout position.
960 */
961 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
962
963 /* convert to pixel counts */
964 vbl_start *= htotal;
965 vbl_end *= htotal;
966 vtotal *= htotal;
967
968 /*
969 * In interlaced modes, the pixel counter counts all pixels,
970 * so one field will have htotal more pixels. In order to avoid
971 * the reported position from jumping backwards when the pixel
972 * counter is beyond the length of the shorter field, just
973 * clamp the position the length of the shorter field. This
974 * matches how the scanline counter based position works since
975 * the scanline counter doesn't count the two half lines.
976 */
977 if (position >= vtotal)
978 position = vtotal - 1;
979
980 /*
981 * Start of vblank interrupt is triggered at start of hsync,
982 * just prior to the first active line of vblank. However we
983 * consider lines to start at the leading edge of horizontal
984 * active. So, should we get here before we've crossed into
985 * the horizontal active of the first line in vblank, we would
986 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
987 * always add htotal-hsync_start to the current pixel position.
988 */
989 position = (position + htotal - hsync_start) % vtotal;
990 }
991
992 /* Get optional system timestamp after query. */
993 if (etime)
994 *etime = ktime_get();
995
996 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
997
998 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
999
1000 in_vbl = position >= vbl_start && position < vbl_end;
1001
1002 /*
1003 * While in vblank, position will be negative
1004 * counting up towards 0 at vbl_end. And outside
1005 * vblank, position will be positive counting
1006 * up since vbl_end.
1007 */
1008 if (position >= vbl_start)
1009 position -= vbl_end;
1010 else
1011 position += vtotal - vbl_end;
1012
1013 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
1014 *vpos = position;
1015 *hpos = 0;
1016 } else {
1017 *vpos = position / htotal;
1018 *hpos = position - (*vpos * htotal);
1019 }
1020
1021 /* In vblank? */
1022 if (in_vbl)
1023 ret |= DRM_SCANOUTPOS_IN_VBLANK;
1024
1025 return ret;
1026 }
1027
1028 int intel_get_crtc_scanline(struct intel_crtc *crtc)
1029 {
1030 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
1031 unsigned long irqflags;
1032 int position;
1033
1034 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1035 position = __intel_get_crtc_scanline(crtc);
1036 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
1037
1038 return position;
1039 }
1040
1041 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
1042 int *max_error,
1043 struct timeval *vblank_time,
1044 unsigned flags)
1045 {
1046 struct drm_crtc *crtc;
1047
1048 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
1049 DRM_ERROR("Invalid crtc %d\n", pipe);
1050 return -EINVAL;
1051 }
1052
1053 /* Get drm_crtc to timestamp: */
1054 crtc = intel_get_crtc_for_pipe(dev, pipe);
1055 if (crtc == NULL) {
1056 DRM_ERROR("Invalid crtc %d\n", pipe);
1057 return -EINVAL;
1058 }
1059
1060 if (!crtc->enabled) {
1061 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
1062 return -EBUSY;
1063 }
1064
1065 /* Helper routine in DRM core does all the work: */
1066 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
1067 vblank_time, flags,
1068 crtc,
1069 &to_intel_crtc(crtc)->config.adjusted_mode);
1070 }
1071
1072 static bool intel_hpd_irq_event(struct drm_device *dev,
1073 struct drm_connector *connector)
1074 {
1075 enum drm_connector_status old_status;
1076
1077 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1078 old_status = connector->status;
1079
1080 connector->status = connector->funcs->detect(connector, false);
1081 if (old_status == connector->status)
1082 return false;
1083
1084 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
1085 connector->base.id,
1086 connector->name,
1087 drm_get_connector_status_name(old_status),
1088 drm_get_connector_status_name(connector->status));
1089
1090 return true;
1091 }
1092
1093 static void i915_digport_work_func(struct work_struct *work)
1094 {
1095 struct drm_i915_private *dev_priv =
1096 container_of(work, struct drm_i915_private, dig_port_work);
1097 u32 long_port_mask, short_port_mask;
1098 struct intel_digital_port *intel_dig_port;
1099 int i, ret;
1100 u32 old_bits = 0;
1101
1102 spin_lock_irq(&dev_priv->irq_lock);
1103 long_port_mask = dev_priv->long_hpd_port_mask;
1104 dev_priv->long_hpd_port_mask = 0;
1105 short_port_mask = dev_priv->short_hpd_port_mask;
1106 dev_priv->short_hpd_port_mask = 0;
1107 spin_unlock_irq(&dev_priv->irq_lock);
1108
1109 for (i = 0; i < I915_MAX_PORTS; i++) {
1110 bool valid = false;
1111 bool long_hpd = false;
1112 intel_dig_port = dev_priv->hpd_irq_port[i];
1113 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
1114 continue;
1115
1116 if (long_port_mask & (1 << i)) {
1117 valid = true;
1118 long_hpd = true;
1119 } else if (short_port_mask & (1 << i))
1120 valid = true;
1121
1122 if (valid) {
1123 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
1124 if (ret == true) {
1125 /* if we get true fallback to old school hpd */
1126 old_bits |= (1 << intel_dig_port->base.hpd_pin);
1127 }
1128 }
1129 }
1130
1131 if (old_bits) {
1132 spin_lock_irq(&dev_priv->irq_lock);
1133 dev_priv->hpd_event_bits |= old_bits;
1134 spin_unlock_irq(&dev_priv->irq_lock);
1135 schedule_work(&dev_priv->hotplug_work);
1136 }
1137 }
1138
1139 /*
1140 * Handle hotplug events outside the interrupt handler proper.
1141 */
1142 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
1143
1144 static void i915_hotplug_work_func(struct work_struct *work)
1145 {
1146 struct drm_i915_private *dev_priv =
1147 container_of(work, struct drm_i915_private, hotplug_work);
1148 struct drm_device *dev = dev_priv->dev;
1149 struct drm_mode_config *mode_config = &dev->mode_config;
1150 struct intel_connector *intel_connector;
1151 struct intel_encoder *intel_encoder;
1152 struct drm_connector *connector;
1153 bool hpd_disabled = false;
1154 bool changed = false;
1155 u32 hpd_event_bits;
1156
1157 mutex_lock(&mode_config->mutex);
1158 DRM_DEBUG_KMS("running encoder hotplug functions\n");
1159
1160 spin_lock_irq(&dev_priv->irq_lock);
1161
1162 hpd_event_bits = dev_priv->hpd_event_bits;
1163 dev_priv->hpd_event_bits = 0;
1164 list_for_each_entry(connector, &mode_config->connector_list, head) {
1165 intel_connector = to_intel_connector(connector);
1166 if (!intel_connector->encoder)
1167 continue;
1168 intel_encoder = intel_connector->encoder;
1169 if (intel_encoder->hpd_pin > HPD_NONE &&
1170 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
1171 connector->polled == DRM_CONNECTOR_POLL_HPD) {
1172 DRM_INFO("HPD interrupt storm detected on connector %s: "
1173 "switching from hotplug detection to polling\n",
1174 connector->name);
1175 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
1176 connector->polled = DRM_CONNECTOR_POLL_CONNECT
1177 | DRM_CONNECTOR_POLL_DISCONNECT;
1178 hpd_disabled = true;
1179 }
1180 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1181 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
1182 connector->name, intel_encoder->hpd_pin);
1183 }
1184 }
1185 /* if there were no outputs to poll, poll was disabled,
1186 * therefore make sure it's enabled when disabling HPD on
1187 * some connectors */
1188 if (hpd_disabled) {
1189 drm_kms_helper_poll_enable(dev);
1190 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
1191 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
1192 }
1193
1194 spin_unlock_irq(&dev_priv->irq_lock);
1195
1196 list_for_each_entry(connector, &mode_config->connector_list, head) {
1197 intel_connector = to_intel_connector(connector);
1198 if (!intel_connector->encoder)
1199 continue;
1200 intel_encoder = intel_connector->encoder;
1201 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
1202 if (intel_encoder->hot_plug)
1203 intel_encoder->hot_plug(intel_encoder);
1204 if (intel_hpd_irq_event(dev, connector))
1205 changed = true;
1206 }
1207 }
1208 mutex_unlock(&mode_config->mutex);
1209
1210 if (changed)
1211 drm_kms_helper_hotplug_event(dev);
1212 }
1213
1214 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1215 {
1216 struct drm_i915_private *dev_priv = dev->dev_private;
1217 u32 busy_up, busy_down, max_avg, min_avg;
1218 u8 new_delay;
1219
1220 spin_lock(&mchdev_lock);
1221
1222 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1223
1224 new_delay = dev_priv->ips.cur_delay;
1225
1226 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1227 busy_up = I915_READ(RCPREVBSYTUPAVG);
1228 busy_down = I915_READ(RCPREVBSYTDNAVG);
1229 max_avg = I915_READ(RCBMAXAVG);
1230 min_avg = I915_READ(RCBMINAVG);
1231
1232 /* Handle RCS change request from hw */
1233 if (busy_up > max_avg) {
1234 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1235 new_delay = dev_priv->ips.cur_delay - 1;
1236 if (new_delay < dev_priv->ips.max_delay)
1237 new_delay = dev_priv->ips.max_delay;
1238 } else if (busy_down < min_avg) {
1239 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1240 new_delay = dev_priv->ips.cur_delay + 1;
1241 if (new_delay > dev_priv->ips.min_delay)
1242 new_delay = dev_priv->ips.min_delay;
1243 }
1244
1245 if (ironlake_set_drps(dev, new_delay))
1246 dev_priv->ips.cur_delay = new_delay;
1247
1248 spin_unlock(&mchdev_lock);
1249
1250 return;
1251 }
1252
1253 static void notify_ring(struct drm_device *dev,
1254 struct intel_engine_cs *ring)
1255 {
1256 if (!intel_ring_initialized(ring))
1257 return;
1258
1259 trace_i915_gem_request_complete(ring);
1260
1261 if (drm_core_check_feature(dev, DRIVER_MODESET))
1262 intel_notify_mmio_flip(ring);
1263
1264 wake_up_all(&ring->irq_queue);
1265 i915_queue_hangcheck(dev);
1266 }
1267
1268 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1269 struct intel_rps_ei *rps_ei)
1270 {
1271 u32 cz_ts, cz_freq_khz;
1272 u32 render_count, media_count;
1273 u32 elapsed_render, elapsed_media, elapsed_time;
1274 u32 residency = 0;
1275
1276 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1277 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1278
1279 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1280 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1281
1282 if (rps_ei->cz_clock == 0) {
1283 rps_ei->cz_clock = cz_ts;
1284 rps_ei->render_c0 = render_count;
1285 rps_ei->media_c0 = media_count;
1286
1287 return dev_priv->rps.cur_freq;
1288 }
1289
1290 elapsed_time = cz_ts - rps_ei->cz_clock;
1291 rps_ei->cz_clock = cz_ts;
1292
1293 elapsed_render = render_count - rps_ei->render_c0;
1294 rps_ei->render_c0 = render_count;
1295
1296 elapsed_media = media_count - rps_ei->media_c0;
1297 rps_ei->media_c0 = media_count;
1298
1299 /* Convert all the counters into common unit of milli sec */
1300 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1301 elapsed_render /= cz_freq_khz;
1302 elapsed_media /= cz_freq_khz;
1303
1304 /*
1305 * Calculate overall C0 residency percentage
1306 * only if elapsed time is non zero
1307 */
1308 if (elapsed_time) {
1309 residency =
1310 ((max(elapsed_render, elapsed_media) * 100)
1311 / elapsed_time);
1312 }
1313
1314 return residency;
1315 }
1316
1317 /**
1318 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1319 * busy-ness calculated from C0 counters of render & media power wells
1320 * @dev_priv: DRM device private
1321 *
1322 */
1323 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1324 {
1325 u32 residency_C0_up = 0, residency_C0_down = 0;
1326 int new_delay, adj;
1327
1328 dev_priv->rps.ei_interrupt_count++;
1329
1330 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1331
1332
1333 if (dev_priv->rps.up_ei.cz_clock == 0) {
1334 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1335 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1336 return dev_priv->rps.cur_freq;
1337 }
1338
1339
1340 /*
1341 * To down throttle, C0 residency should be less than down threshold
1342 * for continous EI intervals. So calculate down EI counters
1343 * once in VLV_INT_COUNT_FOR_DOWN_EI
1344 */
1345 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1346
1347 dev_priv->rps.ei_interrupt_count = 0;
1348
1349 residency_C0_down = vlv_c0_residency(dev_priv,
1350 &dev_priv->rps.down_ei);
1351 } else {
1352 residency_C0_up = vlv_c0_residency(dev_priv,
1353 &dev_priv->rps.up_ei);
1354 }
1355
1356 new_delay = dev_priv->rps.cur_freq;
1357
1358 adj = dev_priv->rps.last_adj;
1359 /* C0 residency is greater than UP threshold. Increase Frequency */
1360 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1361 if (adj > 0)
1362 adj *= 2;
1363 else
1364 adj = 1;
1365
1366 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1367 new_delay = dev_priv->rps.cur_freq + adj;
1368
1369 /*
1370 * For better performance, jump directly
1371 * to RPe if we're below it.
1372 */
1373 if (new_delay < dev_priv->rps.efficient_freq)
1374 new_delay = dev_priv->rps.efficient_freq;
1375
1376 } else if (!dev_priv->rps.ei_interrupt_count &&
1377 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1378 if (adj < 0)
1379 adj *= 2;
1380 else
1381 adj = -1;
1382 /*
1383 * This means, C0 residency is less than down threshold over
1384 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1385 */
1386 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1387 new_delay = dev_priv->rps.cur_freq + adj;
1388 }
1389
1390 return new_delay;
1391 }
1392
1393 static void gen6_pm_rps_work(struct work_struct *work)
1394 {
1395 struct drm_i915_private *dev_priv =
1396 container_of(work, struct drm_i915_private, rps.work);
1397 u32 pm_iir;
1398 int new_delay, adj;
1399
1400 spin_lock_irq(&dev_priv->irq_lock);
1401 pm_iir = dev_priv->rps.pm_iir;
1402 dev_priv->rps.pm_iir = 0;
1403 if (INTEL_INFO(dev_priv->dev)->gen >= 8)
1404 gen8_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1405 else {
1406 /* Make sure not to corrupt PMIMR state used by ringbuffer */
1407 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1408 }
1409 spin_unlock_irq(&dev_priv->irq_lock);
1410
1411 /* Make sure we didn't queue anything we're not going to process. */
1412 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1413
1414 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1415 return;
1416
1417 mutex_lock(&dev_priv->rps.hw_lock);
1418
1419 adj = dev_priv->rps.last_adj;
1420 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1421 if (adj > 0)
1422 adj *= 2;
1423 else {
1424 /* CHV needs even encode values */
1425 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1426 }
1427 new_delay = dev_priv->rps.cur_freq + adj;
1428
1429 /*
1430 * For better performance, jump directly
1431 * to RPe if we're below it.
1432 */
1433 if (new_delay < dev_priv->rps.efficient_freq)
1434 new_delay = dev_priv->rps.efficient_freq;
1435 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1436 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1437 new_delay = dev_priv->rps.efficient_freq;
1438 else
1439 new_delay = dev_priv->rps.min_freq_softlimit;
1440 adj = 0;
1441 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1442 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1443 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1444 if (adj < 0)
1445 adj *= 2;
1446 else {
1447 /* CHV needs even encode values */
1448 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1449 }
1450 new_delay = dev_priv->rps.cur_freq + adj;
1451 } else { /* unknown event */
1452 new_delay = dev_priv->rps.cur_freq;
1453 }
1454
1455 /* sysfs frequency interfaces may have snuck in while servicing the
1456 * interrupt
1457 */
1458 new_delay = clamp_t(int, new_delay,
1459 dev_priv->rps.min_freq_softlimit,
1460 dev_priv->rps.max_freq_softlimit);
1461
1462 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1463
1464 if (IS_VALLEYVIEW(dev_priv->dev))
1465 valleyview_set_rps(dev_priv->dev, new_delay);
1466 else
1467 gen6_set_rps(dev_priv->dev, new_delay);
1468
1469 mutex_unlock(&dev_priv->rps.hw_lock);
1470 }
1471
1472
1473 /**
1474 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1475 * occurred.
1476 * @work: workqueue struct
1477 *
1478 * Doesn't actually do anything except notify userspace. As a consequence of
1479 * this event, userspace should try to remap the bad rows since statistically
1480 * it is likely the same row is more likely to go bad again.
1481 */
1482 static void ivybridge_parity_work(struct work_struct *work)
1483 {
1484 struct drm_i915_private *dev_priv =
1485 container_of(work, struct drm_i915_private, l3_parity.error_work);
1486 u32 error_status, row, bank, subbank;
1487 char *parity_event[6];
1488 uint32_t misccpctl;
1489 uint8_t slice = 0;
1490
1491 /* We must turn off DOP level clock gating to access the L3 registers.
1492 * In order to prevent a get/put style interface, acquire struct mutex
1493 * any time we access those registers.
1494 */
1495 mutex_lock(&dev_priv->dev->struct_mutex);
1496
1497 /* If we've screwed up tracking, just let the interrupt fire again */
1498 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1499 goto out;
1500
1501 misccpctl = I915_READ(GEN7_MISCCPCTL);
1502 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1503 POSTING_READ(GEN7_MISCCPCTL);
1504
1505 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1506 u32 reg;
1507
1508 slice--;
1509 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1510 break;
1511
1512 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1513
1514 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1515
1516 error_status = I915_READ(reg);
1517 row = GEN7_PARITY_ERROR_ROW(error_status);
1518 bank = GEN7_PARITY_ERROR_BANK(error_status);
1519 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1520
1521 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1522 POSTING_READ(reg);
1523
1524 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1525 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1526 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1527 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1528 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1529 parity_event[5] = NULL;
1530
1531 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1532 KOBJ_CHANGE, parity_event);
1533
1534 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1535 slice, row, bank, subbank);
1536
1537 kfree(parity_event[4]);
1538 kfree(parity_event[3]);
1539 kfree(parity_event[2]);
1540 kfree(parity_event[1]);
1541 }
1542
1543 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1544
1545 out:
1546 WARN_ON(dev_priv->l3_parity.which_slice);
1547 spin_lock_irq(&dev_priv->irq_lock);
1548 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1549 spin_unlock_irq(&dev_priv->irq_lock);
1550
1551 mutex_unlock(&dev_priv->dev->struct_mutex);
1552 }
1553
1554 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1555 {
1556 struct drm_i915_private *dev_priv = dev->dev_private;
1557
1558 if (!HAS_L3_DPF(dev))
1559 return;
1560
1561 spin_lock(&dev_priv->irq_lock);
1562 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1563 spin_unlock(&dev_priv->irq_lock);
1564
1565 iir &= GT_PARITY_ERROR(dev);
1566 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1567 dev_priv->l3_parity.which_slice |= 1 << 1;
1568
1569 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1570 dev_priv->l3_parity.which_slice |= 1 << 0;
1571
1572 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1573 }
1574
1575 static void ilk_gt_irq_handler(struct drm_device *dev,
1576 struct drm_i915_private *dev_priv,
1577 u32 gt_iir)
1578 {
1579 if (gt_iir &
1580 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1581 notify_ring(dev, &dev_priv->ring[RCS]);
1582 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1583 notify_ring(dev, &dev_priv->ring[VCS]);
1584 }
1585
1586 static void snb_gt_irq_handler(struct drm_device *dev,
1587 struct drm_i915_private *dev_priv,
1588 u32 gt_iir)
1589 {
1590
1591 if (gt_iir &
1592 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1593 notify_ring(dev, &dev_priv->ring[RCS]);
1594 if (gt_iir & GT_BSD_USER_INTERRUPT)
1595 notify_ring(dev, &dev_priv->ring[VCS]);
1596 if (gt_iir & GT_BLT_USER_INTERRUPT)
1597 notify_ring(dev, &dev_priv->ring[BCS]);
1598
1599 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1600 GT_BSD_CS_ERROR_INTERRUPT |
1601 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
1602 i915_handle_error(dev, false, "GT error interrupt 0x%08x",
1603 gt_iir);
1604 }
1605
1606 if (gt_iir & GT_PARITY_ERROR(dev))
1607 ivybridge_parity_error_irq_handler(dev, gt_iir);
1608 }
1609
1610 static void gen8_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1611 {
1612 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1613 return;
1614
1615 spin_lock(&dev_priv->irq_lock);
1616 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1617 gen8_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1618 spin_unlock(&dev_priv->irq_lock);
1619
1620 queue_work(dev_priv->wq, &dev_priv->rps.work);
1621 }
1622
1623 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1624 struct drm_i915_private *dev_priv,
1625 u32 master_ctl)
1626 {
1627 struct intel_engine_cs *ring;
1628 u32 rcs, bcs, vcs;
1629 uint32_t tmp = 0;
1630 irqreturn_t ret = IRQ_NONE;
1631
1632 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1633 tmp = I915_READ(GEN8_GT_IIR(0));
1634 if (tmp) {
1635 I915_WRITE(GEN8_GT_IIR(0), tmp);
1636 ret = IRQ_HANDLED;
1637
1638 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1639 ring = &dev_priv->ring[RCS];
1640 if (rcs & GT_RENDER_USER_INTERRUPT)
1641 notify_ring(dev, ring);
1642 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1643 intel_execlists_handle_ctx_events(ring);
1644
1645 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1646 ring = &dev_priv->ring[BCS];
1647 if (bcs & GT_RENDER_USER_INTERRUPT)
1648 notify_ring(dev, ring);
1649 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1650 intel_execlists_handle_ctx_events(ring);
1651 } else
1652 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1653 }
1654
1655 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1656 tmp = I915_READ(GEN8_GT_IIR(1));
1657 if (tmp) {
1658 I915_WRITE(GEN8_GT_IIR(1), tmp);
1659 ret = IRQ_HANDLED;
1660
1661 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1662 ring = &dev_priv->ring[VCS];
1663 if (vcs & GT_RENDER_USER_INTERRUPT)
1664 notify_ring(dev, ring);
1665 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1666 intel_execlists_handle_ctx_events(ring);
1667
1668 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1669 ring = &dev_priv->ring[VCS2];
1670 if (vcs & GT_RENDER_USER_INTERRUPT)
1671 notify_ring(dev, ring);
1672 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1673 intel_execlists_handle_ctx_events(ring);
1674 } else
1675 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1676 }
1677
1678 if (master_ctl & GEN8_GT_PM_IRQ) {
1679 tmp = I915_READ(GEN8_GT_IIR(2));
1680 if (tmp & dev_priv->pm_rps_events) {
1681 I915_WRITE(GEN8_GT_IIR(2),
1682 tmp & dev_priv->pm_rps_events);
1683 ret = IRQ_HANDLED;
1684 gen8_rps_irq_handler(dev_priv, tmp);
1685 } else
1686 DRM_ERROR("The master control interrupt lied (PM)!\n");
1687 }
1688
1689 if (master_ctl & GEN8_GT_VECS_IRQ) {
1690 tmp = I915_READ(GEN8_GT_IIR(3));
1691 if (tmp) {
1692 I915_WRITE(GEN8_GT_IIR(3), tmp);
1693 ret = IRQ_HANDLED;
1694
1695 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1696 ring = &dev_priv->ring[VECS];
1697 if (vcs & GT_RENDER_USER_INTERRUPT)
1698 notify_ring(dev, ring);
1699 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1700 intel_execlists_handle_ctx_events(ring);
1701 } else
1702 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1703 }
1704
1705 return ret;
1706 }
1707
1708 #define HPD_STORM_DETECT_PERIOD 1000
1709 #define HPD_STORM_THRESHOLD 5
1710
1711 static int ilk_port_to_hotplug_shift(enum port port)
1712 {
1713 switch (port) {
1714 case PORT_A:
1715 case PORT_E:
1716 default:
1717 return -1;
1718 case PORT_B:
1719 return 0;
1720 case PORT_C:
1721 return 8;
1722 case PORT_D:
1723 return 16;
1724 }
1725 }
1726
1727 static int g4x_port_to_hotplug_shift(enum port port)
1728 {
1729 switch (port) {
1730 case PORT_A:
1731 case PORT_E:
1732 default:
1733 return -1;
1734 case PORT_B:
1735 return 17;
1736 case PORT_C:
1737 return 19;
1738 case PORT_D:
1739 return 21;
1740 }
1741 }
1742
1743 static inline enum port get_port_from_pin(enum hpd_pin pin)
1744 {
1745 switch (pin) {
1746 case HPD_PORT_B:
1747 return PORT_B;
1748 case HPD_PORT_C:
1749 return PORT_C;
1750 case HPD_PORT_D:
1751 return PORT_D;
1752 default:
1753 return PORT_A; /* no hpd */
1754 }
1755 }
1756
1757 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1758 u32 hotplug_trigger,
1759 u32 dig_hotplug_reg,
1760 const u32 *hpd)
1761 {
1762 struct drm_i915_private *dev_priv = dev->dev_private;
1763 int i;
1764 enum port port;
1765 bool storm_detected = false;
1766 bool queue_dig = false, queue_hp = false;
1767 u32 dig_shift;
1768 u32 dig_port_mask = 0;
1769
1770 if (!hotplug_trigger)
1771 return;
1772
1773 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1774 hotplug_trigger, dig_hotplug_reg);
1775
1776 spin_lock(&dev_priv->irq_lock);
1777 for (i = 1; i < HPD_NUM_PINS; i++) {
1778 if (!(hpd[i] & hotplug_trigger))
1779 continue;
1780
1781 port = get_port_from_pin(i);
1782 if (port && dev_priv->hpd_irq_port[port]) {
1783 bool long_hpd;
1784
1785 if (IS_G4X(dev)) {
1786 dig_shift = g4x_port_to_hotplug_shift(port);
1787 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1788 } else {
1789 dig_shift = ilk_port_to_hotplug_shift(port);
1790 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1791 }
1792
1793 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1794 port_name(port),
1795 long_hpd ? "long" : "short");
1796 /* for long HPD pulses we want to have the digital queue happen,
1797 but we still want HPD storm detection to function. */
1798 if (long_hpd) {
1799 dev_priv->long_hpd_port_mask |= (1 << port);
1800 dig_port_mask |= hpd[i];
1801 } else {
1802 /* for short HPD just trigger the digital queue */
1803 dev_priv->short_hpd_port_mask |= (1 << port);
1804 hotplug_trigger &= ~hpd[i];
1805 }
1806 queue_dig = true;
1807 }
1808 }
1809
1810 for (i = 1; i < HPD_NUM_PINS; i++) {
1811 if (hpd[i] & hotplug_trigger &&
1812 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1813 /*
1814 * On GMCH platforms the interrupt mask bits only
1815 * prevent irq generation, not the setting of the
1816 * hotplug bits itself. So only WARN about unexpected
1817 * interrupts on saner platforms.
1818 */
1819 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1820 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1821 hotplug_trigger, i, hpd[i]);
1822
1823 continue;
1824 }
1825
1826 if (!(hpd[i] & hotplug_trigger) ||
1827 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1828 continue;
1829
1830 if (!(dig_port_mask & hpd[i])) {
1831 dev_priv->hpd_event_bits |= (1 << i);
1832 queue_hp = true;
1833 }
1834
1835 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1836 dev_priv->hpd_stats[i].hpd_last_jiffies
1837 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1838 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1839 dev_priv->hpd_stats[i].hpd_cnt = 0;
1840 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1841 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1842 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1843 dev_priv->hpd_event_bits &= ~(1 << i);
1844 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1845 storm_detected = true;
1846 } else {
1847 dev_priv->hpd_stats[i].hpd_cnt++;
1848 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1849 dev_priv->hpd_stats[i].hpd_cnt);
1850 }
1851 }
1852
1853 if (storm_detected)
1854 dev_priv->display.hpd_irq_setup(dev);
1855 spin_unlock(&dev_priv->irq_lock);
1856
1857 /*
1858 * Our hotplug handler can grab modeset locks (by calling down into the
1859 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1860 * queue for otherwise the flush_work in the pageflip code will
1861 * deadlock.
1862 */
1863 if (queue_dig)
1864 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1865 if (queue_hp)
1866 schedule_work(&dev_priv->hotplug_work);
1867 }
1868
1869 static void gmbus_irq_handler(struct drm_device *dev)
1870 {
1871 struct drm_i915_private *dev_priv = dev->dev_private;
1872
1873 wake_up_all(&dev_priv->gmbus_wait_queue);
1874 }
1875
1876 static void dp_aux_irq_handler(struct drm_device *dev)
1877 {
1878 struct drm_i915_private *dev_priv = dev->dev_private;
1879
1880 wake_up_all(&dev_priv->gmbus_wait_queue);
1881 }
1882
1883 #if defined(CONFIG_DEBUG_FS)
1884 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1885 uint32_t crc0, uint32_t crc1,
1886 uint32_t crc2, uint32_t crc3,
1887 uint32_t crc4)
1888 {
1889 struct drm_i915_private *dev_priv = dev->dev_private;
1890 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1891 struct intel_pipe_crc_entry *entry;
1892 int head, tail;
1893
1894 spin_lock(&pipe_crc->lock);
1895
1896 if (!pipe_crc->entries) {
1897 spin_unlock(&pipe_crc->lock);
1898 DRM_ERROR("spurious interrupt\n");
1899 return;
1900 }
1901
1902 head = pipe_crc->head;
1903 tail = pipe_crc->tail;
1904
1905 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1906 spin_unlock(&pipe_crc->lock);
1907 DRM_ERROR("CRC buffer overflowing\n");
1908 return;
1909 }
1910
1911 entry = &pipe_crc->entries[head];
1912
1913 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1914 entry->crc[0] = crc0;
1915 entry->crc[1] = crc1;
1916 entry->crc[2] = crc2;
1917 entry->crc[3] = crc3;
1918 entry->crc[4] = crc4;
1919
1920 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1921 pipe_crc->head = head;
1922
1923 spin_unlock(&pipe_crc->lock);
1924
1925 wake_up_interruptible(&pipe_crc->wq);
1926 }
1927 #else
1928 static inline void
1929 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1930 uint32_t crc0, uint32_t crc1,
1931 uint32_t crc2, uint32_t crc3,
1932 uint32_t crc4) {}
1933 #endif
1934
1935
1936 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1937 {
1938 struct drm_i915_private *dev_priv = dev->dev_private;
1939
1940 display_pipe_crc_irq_handler(dev, pipe,
1941 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1942 0, 0, 0, 0);
1943 }
1944
1945 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1946 {
1947 struct drm_i915_private *dev_priv = dev->dev_private;
1948
1949 display_pipe_crc_irq_handler(dev, pipe,
1950 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1951 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1952 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1953 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1954 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1955 }
1956
1957 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1958 {
1959 struct drm_i915_private *dev_priv = dev->dev_private;
1960 uint32_t res1, res2;
1961
1962 if (INTEL_INFO(dev)->gen >= 3)
1963 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1964 else
1965 res1 = 0;
1966
1967 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1968 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1969 else
1970 res2 = 0;
1971
1972 display_pipe_crc_irq_handler(dev, pipe,
1973 I915_READ(PIPE_CRC_RES_RED(pipe)),
1974 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1975 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1976 res1, res2);
1977 }
1978
1979 void gen8_flip_interrupt(struct drm_device *dev)
1980 {
1981 struct drm_i915_private *dev_priv = dev->dev_private;
1982
1983 if (!dev_priv->rps.is_bdw_sw_turbo)
1984 return;
1985
1986 if(atomic_read(&dev_priv->rps.sw_turbo.flip_received)) {
1987 mod_timer(&dev_priv->rps.sw_turbo.flip_timer,
1988 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies);
1989 }
1990 else {
1991 dev_priv->rps.sw_turbo.flip_timer.expires =
1992 usecs_to_jiffies(dev_priv->rps.sw_turbo.timeout) + jiffies;
1993 add_timer(&dev_priv->rps.sw_turbo.flip_timer);
1994 atomic_set(&dev_priv->rps.sw_turbo.flip_received, true);
1995 }
1996
1997 bdw_software_turbo(dev);
1998 }
1999
2000 /* The RPS events need forcewake, so we add them to a work queue and mask their
2001 * IMR bits until the work is done. Other interrupts can be processed without
2002 * the work queue. */
2003 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
2004 {
2005 if (pm_iir & dev_priv->pm_rps_events) {
2006 spin_lock(&dev_priv->irq_lock);
2007 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
2008 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
2009 spin_unlock(&dev_priv->irq_lock);
2010
2011 queue_work(dev_priv->wq, &dev_priv->rps.work);
2012 }
2013
2014 if (HAS_VEBOX(dev_priv->dev)) {
2015 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
2016 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
2017
2018 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
2019 i915_handle_error(dev_priv->dev, false,
2020 "VEBOX CS error interrupt 0x%08x",
2021 pm_iir);
2022 }
2023 }
2024 }
2025
2026 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
2027 {
2028 if (!drm_handle_vblank(dev, pipe))
2029 return false;
2030
2031 return true;
2032 }
2033
2034 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
2035 {
2036 struct drm_i915_private *dev_priv = dev->dev_private;
2037 u32 pipe_stats[I915_MAX_PIPES] = { };
2038 int pipe;
2039
2040 spin_lock(&dev_priv->irq_lock);
2041 for_each_pipe(dev_priv, pipe) {
2042 int reg;
2043 u32 mask, iir_bit = 0;
2044
2045 /*
2046 * PIPESTAT bits get signalled even when the interrupt is
2047 * disabled with the mask bits, and some of the status bits do
2048 * not generate interrupts at all (like the underrun bit). Hence
2049 * we need to be careful that we only handle what we want to
2050 * handle.
2051 */
2052 mask = 0;
2053 if (__cpu_fifo_underrun_reporting_enabled(dev, pipe))
2054 mask |= PIPE_FIFO_UNDERRUN_STATUS;
2055
2056 switch (pipe) {
2057 case PIPE_A:
2058 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
2059 break;
2060 case PIPE_B:
2061 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
2062 break;
2063 case PIPE_C:
2064 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
2065 break;
2066 }
2067 if (iir & iir_bit)
2068 mask |= dev_priv->pipestat_irq_mask[pipe];
2069
2070 if (!mask)
2071 continue;
2072
2073 reg = PIPESTAT(pipe);
2074 mask |= PIPESTAT_INT_ENABLE_MASK;
2075 pipe_stats[pipe] = I915_READ(reg) & mask;
2076
2077 /*
2078 * Clear the PIPE*STAT regs before the IIR
2079 */
2080 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
2081 PIPESTAT_INT_STATUS_MASK))
2082 I915_WRITE(reg, pipe_stats[pipe]);
2083 }
2084 spin_unlock(&dev_priv->irq_lock);
2085
2086 for_each_pipe(dev_priv, pipe) {
2087 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
2088 intel_pipe_handle_vblank(dev, pipe))
2089 intel_check_page_flip(dev, pipe);
2090
2091 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
2092 intel_prepare_page_flip(dev, pipe);
2093 intel_finish_page_flip(dev, pipe);
2094 }
2095
2096 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
2097 i9xx_pipe_crc_irq_handler(dev, pipe);
2098
2099 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
2100 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2101 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
2102 }
2103
2104 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
2105 gmbus_irq_handler(dev);
2106 }
2107
2108 static void i9xx_hpd_irq_handler(struct drm_device *dev)
2109 {
2110 struct drm_i915_private *dev_priv = dev->dev_private;
2111 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
2112
2113 if (hotplug_status) {
2114 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
2115 /*
2116 * Make sure hotplug status is cleared before we clear IIR, or else we
2117 * may miss hotplug events.
2118 */
2119 POSTING_READ(PORT_HOTPLUG_STAT);
2120
2121 if (IS_G4X(dev)) {
2122 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
2123
2124 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
2125 } else {
2126 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
2127
2128 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
2129 }
2130
2131 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
2132 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
2133 dp_aux_irq_handler(dev);
2134 }
2135 }
2136
2137 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
2138 {
2139 struct drm_device *dev = arg;
2140 struct drm_i915_private *dev_priv = dev->dev_private;
2141 u32 iir, gt_iir, pm_iir;
2142 irqreturn_t ret = IRQ_NONE;
2143
2144 while (true) {
2145 /* Find, clear, then process each source of interrupt */
2146
2147 gt_iir = I915_READ(GTIIR);
2148 if (gt_iir)
2149 I915_WRITE(GTIIR, gt_iir);
2150
2151 pm_iir = I915_READ(GEN6_PMIIR);
2152 if (pm_iir)
2153 I915_WRITE(GEN6_PMIIR, pm_iir);
2154
2155 iir = I915_READ(VLV_IIR);
2156 if (iir) {
2157 /* Consume port before clearing IIR or we'll miss events */
2158 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2159 i9xx_hpd_irq_handler(dev);
2160 I915_WRITE(VLV_IIR, iir);
2161 }
2162
2163 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
2164 goto out;
2165
2166 ret = IRQ_HANDLED;
2167
2168 if (gt_iir)
2169 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2170 if (pm_iir)
2171 gen6_rps_irq_handler(dev_priv, pm_iir);
2172 /* Call regardless, as some status bits might not be
2173 * signalled in iir */
2174 valleyview_pipestat_irq_handler(dev, iir);
2175 }
2176
2177 out:
2178 return ret;
2179 }
2180
2181 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
2182 {
2183 struct drm_device *dev = arg;
2184 struct drm_i915_private *dev_priv = dev->dev_private;
2185 u32 master_ctl, iir;
2186 irqreturn_t ret = IRQ_NONE;
2187
2188 for (;;) {
2189 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
2190 iir = I915_READ(VLV_IIR);
2191
2192 if (master_ctl == 0 && iir == 0)
2193 break;
2194
2195 ret = IRQ_HANDLED;
2196
2197 I915_WRITE(GEN8_MASTER_IRQ, 0);
2198
2199 /* Find, clear, then process each source of interrupt */
2200
2201 if (iir) {
2202 /* Consume port before clearing IIR or we'll miss events */
2203 if (iir & I915_DISPLAY_PORT_INTERRUPT)
2204 i9xx_hpd_irq_handler(dev);
2205 I915_WRITE(VLV_IIR, iir);
2206 }
2207
2208 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2209
2210 /* Call regardless, as some status bits might not be
2211 * signalled in iir */
2212 valleyview_pipestat_irq_handler(dev, iir);
2213
2214 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
2215 POSTING_READ(GEN8_MASTER_IRQ);
2216 }
2217
2218 return ret;
2219 }
2220
2221 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
2222 {
2223 struct drm_i915_private *dev_priv = dev->dev_private;
2224 int pipe;
2225 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
2226 u32 dig_hotplug_reg;
2227
2228 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2229 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2230
2231 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
2232
2233 if (pch_iir & SDE_AUDIO_POWER_MASK) {
2234 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
2235 SDE_AUDIO_POWER_SHIFT);
2236 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
2237 port_name(port));
2238 }
2239
2240 if (pch_iir & SDE_AUX_MASK)
2241 dp_aux_irq_handler(dev);
2242
2243 if (pch_iir & SDE_GMBUS)
2244 gmbus_irq_handler(dev);
2245
2246 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2247 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2248
2249 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2250 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2251
2252 if (pch_iir & SDE_POISON)
2253 DRM_ERROR("PCH poison interrupt\n");
2254
2255 if (pch_iir & SDE_FDI_MASK)
2256 for_each_pipe(dev_priv, pipe)
2257 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2258 pipe_name(pipe),
2259 I915_READ(FDI_RX_IIR(pipe)));
2260
2261 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2262 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2263
2264 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2265 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2266
2267 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2268 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2269 false))
2270 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2271
2272 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2273 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2274 false))
2275 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2276 }
2277
2278 static void ivb_err_int_handler(struct drm_device *dev)
2279 {
2280 struct drm_i915_private *dev_priv = dev->dev_private;
2281 u32 err_int = I915_READ(GEN7_ERR_INT);
2282 enum pipe pipe;
2283
2284 if (err_int & ERR_INT_POISON)
2285 DRM_ERROR("Poison interrupt\n");
2286
2287 for_each_pipe(dev_priv, pipe) {
2288 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) {
2289 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2290 false))
2291 DRM_ERROR("Pipe %c FIFO underrun\n",
2292 pipe_name(pipe));
2293 }
2294
2295 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2296 if (IS_IVYBRIDGE(dev))
2297 ivb_pipe_crc_irq_handler(dev, pipe);
2298 else
2299 hsw_pipe_crc_irq_handler(dev, pipe);
2300 }
2301 }
2302
2303 I915_WRITE(GEN7_ERR_INT, err_int);
2304 }
2305
2306 static void cpt_serr_int_handler(struct drm_device *dev)
2307 {
2308 struct drm_i915_private *dev_priv = dev->dev_private;
2309 u32 serr_int = I915_READ(SERR_INT);
2310
2311 if (serr_int & SERR_INT_POISON)
2312 DRM_ERROR("PCH poison interrupt\n");
2313
2314 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2315 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
2316 false))
2317 DRM_ERROR("PCH transcoder A FIFO underrun\n");
2318
2319 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2320 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
2321 false))
2322 DRM_ERROR("PCH transcoder B FIFO underrun\n");
2323
2324 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2325 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
2326 false))
2327 DRM_ERROR("PCH transcoder C FIFO underrun\n");
2328
2329 I915_WRITE(SERR_INT, serr_int);
2330 }
2331
2332 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2333 {
2334 struct drm_i915_private *dev_priv = dev->dev_private;
2335 int pipe;
2336 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2337 u32 dig_hotplug_reg;
2338
2339 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2340 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2341
2342 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2343
2344 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2345 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2346 SDE_AUDIO_POWER_SHIFT_CPT);
2347 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2348 port_name(port));
2349 }
2350
2351 if (pch_iir & SDE_AUX_MASK_CPT)
2352 dp_aux_irq_handler(dev);
2353
2354 if (pch_iir & SDE_GMBUS_CPT)
2355 gmbus_irq_handler(dev);
2356
2357 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2358 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2359
2360 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2361 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2362
2363 if (pch_iir & SDE_FDI_MASK_CPT)
2364 for_each_pipe(dev_priv, pipe)
2365 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2366 pipe_name(pipe),
2367 I915_READ(FDI_RX_IIR(pipe)));
2368
2369 if (pch_iir & SDE_ERROR_CPT)
2370 cpt_serr_int_handler(dev);
2371 }
2372
2373 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2374 {
2375 struct drm_i915_private *dev_priv = dev->dev_private;
2376 enum pipe pipe;
2377
2378 if (de_iir & DE_AUX_CHANNEL_A)
2379 dp_aux_irq_handler(dev);
2380
2381 if (de_iir & DE_GSE)
2382 intel_opregion_asle_intr(dev);
2383
2384 if (de_iir & DE_POISON)
2385 DRM_ERROR("Poison interrupt\n");
2386
2387 for_each_pipe(dev_priv, pipe) {
2388 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2389 intel_pipe_handle_vblank(dev, pipe))
2390 intel_check_page_flip(dev, pipe);
2391
2392 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2393 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
2394 DRM_ERROR("Pipe %c FIFO underrun\n",
2395 pipe_name(pipe));
2396
2397 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2398 i9xx_pipe_crc_irq_handler(dev, pipe);
2399
2400 /* plane/pipes map 1:1 on ilk+ */
2401 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2402 intel_prepare_page_flip(dev, pipe);
2403 intel_finish_page_flip_plane(dev, pipe);
2404 }
2405 }
2406
2407 /* check event from PCH */
2408 if (de_iir & DE_PCH_EVENT) {
2409 u32 pch_iir = I915_READ(SDEIIR);
2410
2411 if (HAS_PCH_CPT(dev))
2412 cpt_irq_handler(dev, pch_iir);
2413 else
2414 ibx_irq_handler(dev, pch_iir);
2415
2416 /* should clear PCH hotplug event before clear CPU irq */
2417 I915_WRITE(SDEIIR, pch_iir);
2418 }
2419
2420 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2421 ironlake_rps_change_irq_handler(dev);
2422 }
2423
2424 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2425 {
2426 struct drm_i915_private *dev_priv = dev->dev_private;
2427 enum pipe pipe;
2428
2429 if (de_iir & DE_ERR_INT_IVB)
2430 ivb_err_int_handler(dev);
2431
2432 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2433 dp_aux_irq_handler(dev);
2434
2435 if (de_iir & DE_GSE_IVB)
2436 intel_opregion_asle_intr(dev);
2437
2438 for_each_pipe(dev_priv, pipe) {
2439 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2440 intel_pipe_handle_vblank(dev, pipe))
2441 intel_check_page_flip(dev, pipe);
2442
2443 /* plane/pipes map 1:1 on ilk+ */
2444 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2445 intel_prepare_page_flip(dev, pipe);
2446 intel_finish_page_flip_plane(dev, pipe);
2447 }
2448 }
2449
2450 /* check event from PCH */
2451 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2452 u32 pch_iir = I915_READ(SDEIIR);
2453
2454 cpt_irq_handler(dev, pch_iir);
2455
2456 /* clear PCH hotplug event before clear CPU irq */
2457 I915_WRITE(SDEIIR, pch_iir);
2458 }
2459 }
2460
2461 /*
2462 * To handle irqs with the minimum potential races with fresh interrupts, we:
2463 * 1 - Disable Master Interrupt Control.
2464 * 2 - Find the source(s) of the interrupt.
2465 * 3 - Clear the Interrupt Identity bits (IIR).
2466 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2467 * 5 - Re-enable Master Interrupt Control.
2468 */
2469 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2470 {
2471 struct drm_device *dev = arg;
2472 struct drm_i915_private *dev_priv = dev->dev_private;
2473 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2474 irqreturn_t ret = IRQ_NONE;
2475
2476 /* We get interrupts on unclaimed registers, so check for this before we
2477 * do any I915_{READ,WRITE}. */
2478 intel_uncore_check_errors(dev);
2479
2480 /* disable master interrupt before clearing iir */
2481 de_ier = I915_READ(DEIER);
2482 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2483 POSTING_READ(DEIER);
2484
2485 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2486 * interrupts will will be stored on its back queue, and then we'll be
2487 * able to process them after we restore SDEIER (as soon as we restore
2488 * it, we'll get an interrupt if SDEIIR still has something to process
2489 * due to its back queue). */
2490 if (!HAS_PCH_NOP(dev)) {
2491 sde_ier = I915_READ(SDEIER);
2492 I915_WRITE(SDEIER, 0);
2493 POSTING_READ(SDEIER);
2494 }
2495
2496 /* Find, clear, then process each source of interrupt */
2497
2498 gt_iir = I915_READ(GTIIR);
2499 if (gt_iir) {
2500 I915_WRITE(GTIIR, gt_iir);
2501 ret = IRQ_HANDLED;
2502 if (INTEL_INFO(dev)->gen >= 6)
2503 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2504 else
2505 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2506 }
2507
2508 de_iir = I915_READ(DEIIR);
2509 if (de_iir) {
2510 I915_WRITE(DEIIR, de_iir);
2511 ret = IRQ_HANDLED;
2512 if (INTEL_INFO(dev)->gen >= 7)
2513 ivb_display_irq_handler(dev, de_iir);
2514 else
2515 ilk_display_irq_handler(dev, de_iir);
2516 }
2517
2518 if (INTEL_INFO(dev)->gen >= 6) {
2519 u32 pm_iir = I915_READ(GEN6_PMIIR);
2520 if (pm_iir) {
2521 I915_WRITE(GEN6_PMIIR, pm_iir);
2522 ret = IRQ_HANDLED;
2523 gen6_rps_irq_handler(dev_priv, pm_iir);
2524 }
2525 }
2526
2527 I915_WRITE(DEIER, de_ier);
2528 POSTING_READ(DEIER);
2529 if (!HAS_PCH_NOP(dev)) {
2530 I915_WRITE(SDEIER, sde_ier);
2531 POSTING_READ(SDEIER);
2532 }
2533
2534 return ret;
2535 }
2536
2537 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2538 {
2539 struct drm_device *dev = arg;
2540 struct drm_i915_private *dev_priv = dev->dev_private;
2541 u32 master_ctl;
2542 irqreturn_t ret = IRQ_NONE;
2543 uint32_t tmp = 0;
2544 enum pipe pipe;
2545
2546 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2547 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2548 if (!master_ctl)
2549 return IRQ_NONE;
2550
2551 I915_WRITE(GEN8_MASTER_IRQ, 0);
2552 POSTING_READ(GEN8_MASTER_IRQ);
2553
2554 /* Find, clear, then process each source of interrupt */
2555
2556 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2557
2558 if (master_ctl & GEN8_DE_MISC_IRQ) {
2559 tmp = I915_READ(GEN8_DE_MISC_IIR);
2560 if (tmp) {
2561 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2562 ret = IRQ_HANDLED;
2563 if (tmp & GEN8_DE_MISC_GSE)
2564 intel_opregion_asle_intr(dev);
2565 else
2566 DRM_ERROR("Unexpected DE Misc interrupt\n");
2567 }
2568 else
2569 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2570 }
2571
2572 if (master_ctl & GEN8_DE_PORT_IRQ) {
2573 tmp = I915_READ(GEN8_DE_PORT_IIR);
2574 if (tmp) {
2575 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2576 ret = IRQ_HANDLED;
2577 if (tmp & GEN8_AUX_CHANNEL_A)
2578 dp_aux_irq_handler(dev);
2579 else
2580 DRM_ERROR("Unexpected DE Port interrupt\n");
2581 }
2582 else
2583 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2584 }
2585
2586 for_each_pipe(dev_priv, pipe) {
2587 uint32_t pipe_iir;
2588
2589 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2590 continue;
2591
2592 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2593 if (pipe_iir) {
2594 ret = IRQ_HANDLED;
2595 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2596 if (pipe_iir & GEN8_PIPE_VBLANK &&
2597 intel_pipe_handle_vblank(dev, pipe))
2598 intel_check_page_flip(dev, pipe);
2599
2600 if (pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE) {
2601 intel_prepare_page_flip(dev, pipe);
2602 intel_finish_page_flip_plane(dev, pipe);
2603 }
2604
2605 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2606 hsw_pipe_crc_irq_handler(dev, pipe);
2607
2608 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) {
2609 if (intel_set_cpu_fifo_underrun_reporting(dev, pipe,
2610 false))
2611 DRM_ERROR("Pipe %c FIFO underrun\n",
2612 pipe_name(pipe));
2613 }
2614
2615 if (pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS) {
2616 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2617 pipe_name(pipe),
2618 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2619 }
2620 } else
2621 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2622 }
2623
2624 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2625 /*
2626 * FIXME(BDW): Assume for now that the new interrupt handling
2627 * scheme also closed the SDE interrupt handling race we've seen
2628 * on older pch-split platforms. But this needs testing.
2629 */
2630 u32 pch_iir = I915_READ(SDEIIR);
2631 if (pch_iir) {
2632 I915_WRITE(SDEIIR, pch_iir);
2633 ret = IRQ_HANDLED;
2634 cpt_irq_handler(dev, pch_iir);
2635 } else
2636 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2637
2638 }
2639
2640 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2641 POSTING_READ(GEN8_MASTER_IRQ);
2642
2643 return ret;
2644 }
2645
2646 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2647 bool reset_completed)
2648 {
2649 struct intel_engine_cs *ring;
2650 int i;
2651
2652 /*
2653 * Notify all waiters for GPU completion events that reset state has
2654 * been changed, and that they need to restart their wait after
2655 * checking for potential errors (and bail out to drop locks if there is
2656 * a gpu reset pending so that i915_error_work_func can acquire them).
2657 */
2658
2659 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2660 for_each_ring(ring, dev_priv, i)
2661 wake_up_all(&ring->irq_queue);
2662
2663 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2664 wake_up_all(&dev_priv->pending_flip_queue);
2665
2666 /*
2667 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2668 * reset state is cleared.
2669 */
2670 if (reset_completed)
2671 wake_up_all(&dev_priv->gpu_error.reset_queue);
2672 }
2673
2674 /**
2675 * i915_error_work_func - do process context error handling work
2676 * @work: work struct
2677 *
2678 * Fire an error uevent so userspace can see that a hang or error
2679 * was detected.
2680 */
2681 static void i915_error_work_func(struct work_struct *work)
2682 {
2683 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
2684 work);
2685 struct drm_i915_private *dev_priv =
2686 container_of(error, struct drm_i915_private, gpu_error);
2687 struct drm_device *dev = dev_priv->dev;
2688 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2689 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2690 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2691 int ret;
2692
2693 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2694
2695 /*
2696 * Note that there's only one work item which does gpu resets, so we
2697 * need not worry about concurrent gpu resets potentially incrementing
2698 * error->reset_counter twice. We only need to take care of another
2699 * racing irq/hangcheck declaring the gpu dead for a second time. A
2700 * quick check for that is good enough: schedule_work ensures the
2701 * correct ordering between hang detection and this work item, and since
2702 * the reset in-progress bit is only ever set by code outside of this
2703 * work we don't need to worry about any other races.
2704 */
2705 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2706 DRM_DEBUG_DRIVER("resetting chip\n");
2707 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2708 reset_event);
2709
2710 /*
2711 * In most cases it's guaranteed that we get here with an RPM
2712 * reference held, for example because there is a pending GPU
2713 * request that won't finish until the reset is done. This
2714 * isn't the case at least when we get here by doing a
2715 * simulated reset via debugs, so get an RPM reference.
2716 */
2717 intel_runtime_pm_get(dev_priv);
2718 /*
2719 * All state reset _must_ be completed before we update the
2720 * reset counter, for otherwise waiters might miss the reset
2721 * pending state and not properly drop locks, resulting in
2722 * deadlocks with the reset work.
2723 */
2724 ret = i915_reset(dev);
2725
2726 intel_display_handle_reset(dev);
2727
2728 intel_runtime_pm_put(dev_priv);
2729
2730 if (ret == 0) {
2731 /*
2732 * After all the gem state is reset, increment the reset
2733 * counter and wake up everyone waiting for the reset to
2734 * complete.
2735 *
2736 * Since unlock operations are a one-sided barrier only,
2737 * we need to insert a barrier here to order any seqno
2738 * updates before
2739 * the counter increment.
2740 */
2741 smp_mb__before_atomic();
2742 atomic_inc(&dev_priv->gpu_error.reset_counter);
2743
2744 kobject_uevent_env(&dev->primary->kdev->kobj,
2745 KOBJ_CHANGE, reset_done_event);
2746 } else {
2747 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2748 }
2749
2750 /*
2751 * Note: The wake_up also serves as a memory barrier so that
2752 * waiters see the update value of the reset counter atomic_t.
2753 */
2754 i915_error_wake_up(dev_priv, true);
2755 }
2756 }
2757
2758 static void i915_report_and_clear_eir(struct drm_device *dev)
2759 {
2760 struct drm_i915_private *dev_priv = dev->dev_private;
2761 uint32_t instdone[I915_NUM_INSTDONE_REG];
2762 u32 eir = I915_READ(EIR);
2763 int pipe, i;
2764
2765 if (!eir)
2766 return;
2767
2768 pr_err("render error detected, EIR: 0x%08x\n", eir);
2769
2770 i915_get_extra_instdone(dev, instdone);
2771
2772 if (IS_G4X(dev)) {
2773 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2774 u32 ipeir = I915_READ(IPEIR_I965);
2775
2776 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2777 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2778 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2779 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2780 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2781 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2782 I915_WRITE(IPEIR_I965, ipeir);
2783 POSTING_READ(IPEIR_I965);
2784 }
2785 if (eir & GM45_ERROR_PAGE_TABLE) {
2786 u32 pgtbl_err = I915_READ(PGTBL_ER);
2787 pr_err("page table error\n");
2788 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2789 I915_WRITE(PGTBL_ER, pgtbl_err);
2790 POSTING_READ(PGTBL_ER);
2791 }
2792 }
2793
2794 if (!IS_GEN2(dev)) {
2795 if (eir & I915_ERROR_PAGE_TABLE) {
2796 u32 pgtbl_err = I915_READ(PGTBL_ER);
2797 pr_err("page table error\n");
2798 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2799 I915_WRITE(PGTBL_ER, pgtbl_err);
2800 POSTING_READ(PGTBL_ER);
2801 }
2802 }
2803
2804 if (eir & I915_ERROR_MEMORY_REFRESH) {
2805 pr_err("memory refresh error:\n");
2806 for_each_pipe(dev_priv, pipe)
2807 pr_err("pipe %c stat: 0x%08x\n",
2808 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2809 /* pipestat has already been acked */
2810 }
2811 if (eir & I915_ERROR_INSTRUCTION) {
2812 pr_err("instruction error\n");
2813 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2814 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2815 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2816 if (INTEL_INFO(dev)->gen < 4) {
2817 u32 ipeir = I915_READ(IPEIR);
2818
2819 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2820 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2821 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2822 I915_WRITE(IPEIR, ipeir);
2823 POSTING_READ(IPEIR);
2824 } else {
2825 u32 ipeir = I915_READ(IPEIR_I965);
2826
2827 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2828 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2829 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2830 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2831 I915_WRITE(IPEIR_I965, ipeir);
2832 POSTING_READ(IPEIR_I965);
2833 }
2834 }
2835
2836 I915_WRITE(EIR, eir);
2837 POSTING_READ(EIR);
2838 eir = I915_READ(EIR);
2839 if (eir) {
2840 /*
2841 * some errors might have become stuck,
2842 * mask them.
2843 */
2844 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2845 I915_WRITE(EMR, I915_READ(EMR) | eir);
2846 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2847 }
2848 }
2849
2850 /**
2851 * i915_handle_error - handle an error interrupt
2852 * @dev: drm device
2853 *
2854 * Do some basic checking of regsiter state at error interrupt time and
2855 * dump it to the syslog. Also call i915_capture_error_state() to make
2856 * sure we get a record and make it available in debugfs. Fire a uevent
2857 * so userspace knows something bad happened (should trigger collection
2858 * of a ring dump etc.).
2859 */
2860 void i915_handle_error(struct drm_device *dev, bool wedged,
2861 const char *fmt, ...)
2862 {
2863 struct drm_i915_private *dev_priv = dev->dev_private;
2864 va_list args;
2865 char error_msg[80];
2866
2867 va_start(args, fmt);
2868 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2869 va_end(args);
2870
2871 i915_capture_error_state(dev, wedged, error_msg);
2872 i915_report_and_clear_eir(dev);
2873
2874 if (wedged) {
2875 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2876 &dev_priv->gpu_error.reset_counter);
2877
2878 /*
2879 * Wakeup waiting processes so that the reset work function
2880 * i915_error_work_func doesn't deadlock trying to grab various
2881 * locks. By bumping the reset counter first, the woken
2882 * processes will see a reset in progress and back off,
2883 * releasing their locks and then wait for the reset completion.
2884 * We must do this for _all_ gpu waiters that might hold locks
2885 * that the reset work needs to acquire.
2886 *
2887 * Note: The wake_up serves as the required memory barrier to
2888 * ensure that the waiters see the updated value of the reset
2889 * counter atomic_t.
2890 */
2891 i915_error_wake_up(dev_priv, false);
2892 }
2893
2894 /*
2895 * Our reset work can grab modeset locks (since it needs to reset the
2896 * state of outstanding pagelips). Hence it must not be run on our own
2897 * dev-priv->wq work queue for otherwise the flush_work in the pageflip
2898 * code will deadlock.
2899 */
2900 schedule_work(&dev_priv->gpu_error.work);
2901 }
2902
2903 /* Called from drm generic code, passed 'crtc' which
2904 * we use as a pipe index
2905 */
2906 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2907 {
2908 struct drm_i915_private *dev_priv = dev->dev_private;
2909 unsigned long irqflags;
2910
2911 if (!i915_pipe_enabled(dev, pipe))
2912 return -EINVAL;
2913
2914 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2915 if (INTEL_INFO(dev)->gen >= 4)
2916 i915_enable_pipestat(dev_priv, pipe,
2917 PIPE_START_VBLANK_INTERRUPT_STATUS);
2918 else
2919 i915_enable_pipestat(dev_priv, pipe,
2920 PIPE_VBLANK_INTERRUPT_STATUS);
2921 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2922
2923 return 0;
2924 }
2925
2926 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2927 {
2928 struct drm_i915_private *dev_priv = dev->dev_private;
2929 unsigned long irqflags;
2930 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2931 DE_PIPE_VBLANK(pipe);
2932
2933 if (!i915_pipe_enabled(dev, pipe))
2934 return -EINVAL;
2935
2936 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2937 ironlake_enable_display_irq(dev_priv, bit);
2938 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2939
2940 return 0;
2941 }
2942
2943 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2944 {
2945 struct drm_i915_private *dev_priv = dev->dev_private;
2946 unsigned long irqflags;
2947
2948 if (!i915_pipe_enabled(dev, pipe))
2949 return -EINVAL;
2950
2951 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2952 i915_enable_pipestat(dev_priv, pipe,
2953 PIPE_START_VBLANK_INTERRUPT_STATUS);
2954 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2955
2956 return 0;
2957 }
2958
2959 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2960 {
2961 struct drm_i915_private *dev_priv = dev->dev_private;
2962 unsigned long irqflags;
2963
2964 if (!i915_pipe_enabled(dev, pipe))
2965 return -EINVAL;
2966
2967 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2968 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2969 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2970 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2971 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2972 return 0;
2973 }
2974
2975 /* Called from drm generic code, passed 'crtc' which
2976 * we use as a pipe index
2977 */
2978 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2979 {
2980 struct drm_i915_private *dev_priv = dev->dev_private;
2981 unsigned long irqflags;
2982
2983 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2984 i915_disable_pipestat(dev_priv, pipe,
2985 PIPE_VBLANK_INTERRUPT_STATUS |
2986 PIPE_START_VBLANK_INTERRUPT_STATUS);
2987 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2988 }
2989
2990 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2991 {
2992 struct drm_i915_private *dev_priv = dev->dev_private;
2993 unsigned long irqflags;
2994 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2995 DE_PIPE_VBLANK(pipe);
2996
2997 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2998 ironlake_disable_display_irq(dev_priv, bit);
2999 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3000 }
3001
3002 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
3003 {
3004 struct drm_i915_private *dev_priv = dev->dev_private;
3005 unsigned long irqflags;
3006
3007 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3008 i915_disable_pipestat(dev_priv, pipe,
3009 PIPE_START_VBLANK_INTERRUPT_STATUS);
3010 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3011 }
3012
3013 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
3014 {
3015 struct drm_i915_private *dev_priv = dev->dev_private;
3016 unsigned long irqflags;
3017
3018 if (!i915_pipe_enabled(dev, pipe))
3019 return;
3020
3021 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3022 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
3023 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
3024 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
3025 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3026 }
3027
3028 static u32
3029 ring_last_seqno(struct intel_engine_cs *ring)
3030 {
3031 return list_entry(ring->request_list.prev,
3032 struct drm_i915_gem_request, list)->seqno;
3033 }
3034
3035 static bool
3036 ring_idle(struct intel_engine_cs *ring, u32 seqno)
3037 {
3038 return (list_empty(&ring->request_list) ||
3039 i915_seqno_passed(seqno, ring_last_seqno(ring)));
3040 }
3041
3042 static bool
3043 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
3044 {
3045 if (INTEL_INFO(dev)->gen >= 8) {
3046 return (ipehr >> 23) == 0x1c;
3047 } else {
3048 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
3049 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
3050 MI_SEMAPHORE_REGISTER);
3051 }
3052 }
3053
3054 static struct intel_engine_cs *
3055 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
3056 {
3057 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3058 struct intel_engine_cs *signaller;
3059 int i;
3060
3061 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
3062 for_each_ring(signaller, dev_priv, i) {
3063 if (ring == signaller)
3064 continue;
3065
3066 if (offset == signaller->semaphore.signal_ggtt[ring->id])
3067 return signaller;
3068 }
3069 } else {
3070 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
3071
3072 for_each_ring(signaller, dev_priv, i) {
3073 if(ring == signaller)
3074 continue;
3075
3076 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
3077 return signaller;
3078 }
3079 }
3080
3081 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
3082 ring->id, ipehr, offset);
3083
3084 return NULL;
3085 }
3086
3087 static struct intel_engine_cs *
3088 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
3089 {
3090 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3091 u32 cmd, ipehr, head;
3092 u64 offset = 0;
3093 int i, backwards;
3094
3095 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
3096 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
3097 return NULL;
3098
3099 /*
3100 * HEAD is likely pointing to the dword after the actual command,
3101 * so scan backwards until we find the MBOX. But limit it to just 3
3102 * or 4 dwords depending on the semaphore wait command size.
3103 * Note that we don't care about ACTHD here since that might
3104 * point at at batch, and semaphores are always emitted into the
3105 * ringbuffer itself.
3106 */
3107 head = I915_READ_HEAD(ring) & HEAD_ADDR;
3108 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
3109
3110 for (i = backwards; i; --i) {
3111 /*
3112 * Be paranoid and presume the hw has gone off into the wild -
3113 * our ring is smaller than what the hardware (and hence
3114 * HEAD_ADDR) allows. Also handles wrap-around.
3115 */
3116 head &= ring->buffer->size - 1;
3117
3118 /* This here seems to blow up */
3119 cmd = ioread32(ring->buffer->virtual_start + head);
3120 if (cmd == ipehr)
3121 break;
3122
3123 head -= 4;
3124 }
3125
3126 if (!i)
3127 return NULL;
3128
3129 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
3130 if (INTEL_INFO(ring->dev)->gen >= 8) {
3131 offset = ioread32(ring->buffer->virtual_start + head + 12);
3132 offset <<= 32;
3133 offset = ioread32(ring->buffer->virtual_start + head + 8);
3134 }
3135 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
3136 }
3137
3138 static int semaphore_passed(struct intel_engine_cs *ring)
3139 {
3140 struct drm_i915_private *dev_priv = ring->dev->dev_private;
3141 struct intel_engine_cs *signaller;
3142 u32 seqno;
3143
3144 ring->hangcheck.deadlock++;
3145
3146 signaller = semaphore_waits_for(ring, &seqno);
3147 if (signaller == NULL)
3148 return -1;
3149
3150 /* Prevent pathological recursion due to driver bugs */
3151 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
3152 return -1;
3153
3154 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
3155 return 1;
3156
3157 /* cursory check for an unkickable deadlock */
3158 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
3159 semaphore_passed(signaller) < 0)
3160 return -1;
3161
3162 return 0;
3163 }
3164
3165 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
3166 {
3167 struct intel_engine_cs *ring;
3168 int i;
3169
3170 for_each_ring(ring, dev_priv, i)
3171 ring->hangcheck.deadlock = 0;
3172 }
3173
3174 static enum intel_ring_hangcheck_action
3175 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
3176 {
3177 struct drm_device *dev = ring->dev;
3178 struct drm_i915_private *dev_priv = dev->dev_private;
3179 u32 tmp;
3180
3181 if (acthd != ring->hangcheck.acthd) {
3182 if (acthd > ring->hangcheck.max_acthd) {
3183 ring->hangcheck.max_acthd = acthd;
3184 return HANGCHECK_ACTIVE;
3185 }
3186
3187 return HANGCHECK_ACTIVE_LOOP;
3188 }
3189
3190 if (IS_GEN2(dev))
3191 return HANGCHECK_HUNG;
3192
3193 /* Is the chip hanging on a WAIT_FOR_EVENT?
3194 * If so we can simply poke the RB_WAIT bit
3195 * and break the hang. This should work on
3196 * all but the second generation chipsets.
3197 */
3198 tmp = I915_READ_CTL(ring);
3199 if (tmp & RING_WAIT) {
3200 i915_handle_error(dev, false,
3201 "Kicking stuck wait on %s",
3202 ring->name);
3203 I915_WRITE_CTL(ring, tmp);
3204 return HANGCHECK_KICK;
3205 }
3206
3207 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3208 switch (semaphore_passed(ring)) {
3209 default:
3210 return HANGCHECK_HUNG;
3211 case 1:
3212 i915_handle_error(dev, false,
3213 "Kicking stuck semaphore on %s",
3214 ring->name);
3215 I915_WRITE_CTL(ring, tmp);
3216 return HANGCHECK_KICK;
3217 case 0:
3218 return HANGCHECK_WAIT;
3219 }
3220 }
3221
3222 return HANGCHECK_HUNG;
3223 }
3224
3225 /**
3226 * This is called when the chip hasn't reported back with completed
3227 * batchbuffers in a long time. We keep track per ring seqno progress and
3228 * if there are no progress, hangcheck score for that ring is increased.
3229 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3230 * we kick the ring. If we see no progress on three subsequent calls
3231 * we assume chip is wedged and try to fix it by resetting the chip.
3232 */
3233 static void i915_hangcheck_elapsed(unsigned long data)
3234 {
3235 struct drm_device *dev = (struct drm_device *)data;
3236 struct drm_i915_private *dev_priv = dev->dev_private;
3237 struct intel_engine_cs *ring;
3238 int i;
3239 int busy_count = 0, rings_hung = 0;
3240 bool stuck[I915_NUM_RINGS] = { 0 };
3241 #define BUSY 1
3242 #define KICK 5
3243 #define HUNG 20
3244
3245 if (!i915.enable_hangcheck)
3246 return;
3247
3248 for_each_ring(ring, dev_priv, i) {
3249 u64 acthd;
3250 u32 seqno;
3251 bool busy = true;
3252
3253 semaphore_clear_deadlocks(dev_priv);
3254
3255 seqno = ring->get_seqno(ring, false);
3256 acthd = intel_ring_get_active_head(ring);
3257
3258 if (ring->hangcheck.seqno == seqno) {
3259 if (ring_idle(ring, seqno)) {
3260 ring->hangcheck.action = HANGCHECK_IDLE;
3261
3262 if (waitqueue_active(&ring->irq_queue)) {
3263 /* Issue a wake-up to catch stuck h/w. */
3264 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3265 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3266 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3267 ring->name);
3268 else
3269 DRM_INFO("Fake missed irq on %s\n",
3270 ring->name);
3271 wake_up_all(&ring->irq_queue);
3272 }
3273 /* Safeguard against driver failure */
3274 ring->hangcheck.score += BUSY;
3275 } else
3276 busy = false;
3277 } else {
3278 /* We always increment the hangcheck score
3279 * if the ring is busy and still processing
3280 * the same request, so that no single request
3281 * can run indefinitely (such as a chain of
3282 * batches). The only time we do not increment
3283 * the hangcheck score on this ring, if this
3284 * ring is in a legitimate wait for another
3285 * ring. In that case the waiting ring is a
3286 * victim and we want to be sure we catch the
3287 * right culprit. Then every time we do kick
3288 * the ring, add a small increment to the
3289 * score so that we can catch a batch that is
3290 * being repeatedly kicked and so responsible
3291 * for stalling the machine.
3292 */
3293 ring->hangcheck.action = ring_stuck(ring,
3294 acthd);
3295
3296 switch (ring->hangcheck.action) {
3297 case HANGCHECK_IDLE:
3298 case HANGCHECK_WAIT:
3299 case HANGCHECK_ACTIVE:
3300 break;
3301 case HANGCHECK_ACTIVE_LOOP:
3302 ring->hangcheck.score += BUSY;
3303 break;
3304 case HANGCHECK_KICK:
3305 ring->hangcheck.score += KICK;
3306 break;
3307 case HANGCHECK_HUNG:
3308 ring->hangcheck.score += HUNG;
3309 stuck[i] = true;
3310 break;
3311 }
3312 }
3313 } else {
3314 ring->hangcheck.action = HANGCHECK_ACTIVE;
3315
3316 /* Gradually reduce the count so that we catch DoS
3317 * attempts across multiple batches.
3318 */
3319 if (ring->hangcheck.score > 0)
3320 ring->hangcheck.score--;
3321
3322 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3323 }
3324
3325 ring->hangcheck.seqno = seqno;
3326 ring->hangcheck.acthd = acthd;
3327 busy_count += busy;
3328 }
3329
3330 for_each_ring(ring, dev_priv, i) {
3331 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3332 DRM_INFO("%s on %s\n",
3333 stuck[i] ? "stuck" : "no progress",
3334 ring->name);
3335 rings_hung++;
3336 }
3337 }
3338
3339 if (rings_hung)
3340 return i915_handle_error(dev, true, "Ring hung");
3341
3342 if (busy_count)
3343 /* Reset timer case chip hangs without another request
3344 * being added */
3345 i915_queue_hangcheck(dev);
3346 }
3347
3348 void i915_queue_hangcheck(struct drm_device *dev)
3349 {
3350 struct drm_i915_private *dev_priv = dev->dev_private;
3351 if (!i915.enable_hangcheck)
3352 return;
3353
3354 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
3355 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3356 }
3357
3358 static void ibx_irq_reset(struct drm_device *dev)
3359 {
3360 struct drm_i915_private *dev_priv = dev->dev_private;
3361
3362 if (HAS_PCH_NOP(dev))
3363 return;
3364
3365 GEN5_IRQ_RESET(SDE);
3366
3367 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3368 I915_WRITE(SERR_INT, 0xffffffff);
3369 }
3370
3371 /*
3372 * SDEIER is also touched by the interrupt handler to work around missed PCH
3373 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3374 * instead we unconditionally enable all PCH interrupt sources here, but then
3375 * only unmask them as needed with SDEIMR.
3376 *
3377 * This function needs to be called before interrupts are enabled.
3378 */
3379 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3380 {
3381 struct drm_i915_private *dev_priv = dev->dev_private;
3382
3383 if (HAS_PCH_NOP(dev))
3384 return;
3385
3386 WARN_ON(I915_READ(SDEIER) != 0);
3387 I915_WRITE(SDEIER, 0xffffffff);
3388 POSTING_READ(SDEIER);
3389 }
3390
3391 static void gen5_gt_irq_reset(struct drm_device *dev)
3392 {
3393 struct drm_i915_private *dev_priv = dev->dev_private;
3394
3395 GEN5_IRQ_RESET(GT);
3396 if (INTEL_INFO(dev)->gen >= 6)
3397 GEN5_IRQ_RESET(GEN6_PM);
3398 }
3399
3400 /* drm_dma.h hooks
3401 */
3402 static void ironlake_irq_reset(struct drm_device *dev)
3403 {
3404 struct drm_i915_private *dev_priv = dev->dev_private;
3405
3406 I915_WRITE(HWSTAM, 0xffffffff);
3407
3408 GEN5_IRQ_RESET(DE);
3409 if (IS_GEN7(dev))
3410 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3411
3412 gen5_gt_irq_reset(dev);
3413
3414 ibx_irq_reset(dev);
3415 }
3416
3417 static void valleyview_irq_preinstall(struct drm_device *dev)
3418 {
3419 struct drm_i915_private *dev_priv = dev->dev_private;
3420 int pipe;
3421
3422 /* VLV magic */
3423 I915_WRITE(VLV_IMR, 0);
3424 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3425 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3426 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3427
3428 /* and GT */
3429 I915_WRITE(GTIIR, I915_READ(GTIIR));
3430 I915_WRITE(GTIIR, I915_READ(GTIIR));
3431
3432 gen5_gt_irq_reset(dev);
3433
3434 I915_WRITE(DPINVGTT, 0xff);
3435
3436 I915_WRITE(PORT_HOTPLUG_EN, 0);
3437 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3438 for_each_pipe(dev_priv, pipe)
3439 I915_WRITE(PIPESTAT(pipe), 0xffff);
3440 I915_WRITE(VLV_IIR, 0xffffffff);
3441 I915_WRITE(VLV_IMR, 0xffffffff);
3442 I915_WRITE(VLV_IER, 0x0);
3443 POSTING_READ(VLV_IER);
3444 }
3445
3446 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3447 {
3448 GEN8_IRQ_RESET_NDX(GT, 0);
3449 GEN8_IRQ_RESET_NDX(GT, 1);
3450 GEN8_IRQ_RESET_NDX(GT, 2);
3451 GEN8_IRQ_RESET_NDX(GT, 3);
3452 }
3453
3454 static void gen8_irq_reset(struct drm_device *dev)
3455 {
3456 struct drm_i915_private *dev_priv = dev->dev_private;
3457 int pipe;
3458
3459 I915_WRITE(GEN8_MASTER_IRQ, 0);
3460 POSTING_READ(GEN8_MASTER_IRQ);
3461
3462 gen8_gt_irq_reset(dev_priv);
3463
3464 for_each_pipe(dev_priv, pipe)
3465 if (intel_display_power_enabled(dev_priv,
3466 POWER_DOMAIN_PIPE(pipe)))
3467 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3468
3469 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3470 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3471 GEN5_IRQ_RESET(GEN8_PCU_);
3472
3473 ibx_irq_reset(dev);
3474 }
3475
3476 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3477 {
3478 spin_lock_irq(&dev_priv->irq_lock);
3479 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3480 ~dev_priv->de_irq_mask[PIPE_B]);
3481 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3482 ~dev_priv->de_irq_mask[PIPE_C]);
3483 spin_unlock_irq(&dev_priv->irq_lock);
3484 }
3485
3486 static void cherryview_irq_preinstall(struct drm_device *dev)
3487 {
3488 struct drm_i915_private *dev_priv = dev->dev_private;
3489 int pipe;
3490
3491 I915_WRITE(GEN8_MASTER_IRQ, 0);
3492 POSTING_READ(GEN8_MASTER_IRQ);
3493
3494 gen8_gt_irq_reset(dev_priv);
3495
3496 GEN5_IRQ_RESET(GEN8_PCU_);
3497
3498 POSTING_READ(GEN8_PCU_IIR);
3499
3500 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3501
3502 I915_WRITE(PORT_HOTPLUG_EN, 0);
3503 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3504
3505 for_each_pipe(dev_priv, pipe)
3506 I915_WRITE(PIPESTAT(pipe), 0xffff);
3507
3508 I915_WRITE(VLV_IMR, 0xffffffff);
3509 I915_WRITE(VLV_IER, 0x0);
3510 I915_WRITE(VLV_IIR, 0xffffffff);
3511 POSTING_READ(VLV_IIR);
3512 }
3513
3514 static void ibx_hpd_irq_setup(struct drm_device *dev)
3515 {
3516 struct drm_i915_private *dev_priv = dev->dev_private;
3517 struct intel_encoder *intel_encoder;
3518 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3519
3520 if (HAS_PCH_IBX(dev)) {
3521 hotplug_irqs = SDE_HOTPLUG_MASK;
3522 for_each_intel_encoder(dev, intel_encoder)
3523 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3524 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3525 } else {
3526 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3527 for_each_intel_encoder(dev, intel_encoder)
3528 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3529 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3530 }
3531
3532 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3533
3534 /*
3535 * Enable digital hotplug on the PCH, and configure the DP short pulse
3536 * duration to 2ms (which is the minimum in the Display Port spec)
3537 *
3538 * This register is the same on all known PCH chips.
3539 */
3540 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3541 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3542 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3543 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3544 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3545 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3546 }
3547
3548 static void ibx_irq_postinstall(struct drm_device *dev)
3549 {
3550 struct drm_i915_private *dev_priv = dev->dev_private;
3551 u32 mask;
3552
3553 if (HAS_PCH_NOP(dev))
3554 return;
3555
3556 if (HAS_PCH_IBX(dev))
3557 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3558 else
3559 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3560
3561 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3562 I915_WRITE(SDEIMR, ~mask);
3563 }
3564
3565 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3566 {
3567 struct drm_i915_private *dev_priv = dev->dev_private;
3568 u32 pm_irqs, gt_irqs;
3569
3570 pm_irqs = gt_irqs = 0;
3571
3572 dev_priv->gt_irq_mask = ~0;
3573 if (HAS_L3_DPF(dev)) {
3574 /* L3 parity interrupt is always unmasked. */
3575 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3576 gt_irqs |= GT_PARITY_ERROR(dev);
3577 }
3578
3579 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3580 if (IS_GEN5(dev)) {
3581 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3582 ILK_BSD_USER_INTERRUPT;
3583 } else {
3584 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3585 }
3586
3587 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3588
3589 if (INTEL_INFO(dev)->gen >= 6) {
3590 pm_irqs |= dev_priv->pm_rps_events;
3591
3592 if (HAS_VEBOX(dev))
3593 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3594
3595 dev_priv->pm_irq_mask = 0xffffffff;
3596 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3597 }
3598 }
3599
3600 static int ironlake_irq_postinstall(struct drm_device *dev)
3601 {
3602 struct drm_i915_private *dev_priv = dev->dev_private;
3603 u32 display_mask, extra_mask;
3604
3605 if (INTEL_INFO(dev)->gen >= 7) {
3606 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3607 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3608 DE_PLANEB_FLIP_DONE_IVB |
3609 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3610 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3611 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3612 } else {
3613 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3614 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3615 DE_AUX_CHANNEL_A |
3616 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3617 DE_POISON);
3618 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3619 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3620 }
3621
3622 dev_priv->irq_mask = ~display_mask;
3623
3624 I915_WRITE(HWSTAM, 0xeffe);
3625
3626 ibx_irq_pre_postinstall(dev);
3627
3628 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3629
3630 gen5_gt_irq_postinstall(dev);
3631
3632 ibx_irq_postinstall(dev);
3633
3634 if (IS_IRONLAKE_M(dev)) {
3635 /* Enable PCU event interrupts
3636 *
3637 * spinlocking not required here for correctness since interrupt
3638 * setup is guaranteed to run in single-threaded context. But we
3639 * need it to make the assert_spin_locked happy. */
3640 spin_lock_irq(&dev_priv->irq_lock);
3641 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3642 spin_unlock_irq(&dev_priv->irq_lock);
3643 }
3644
3645 return 0;
3646 }
3647
3648 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3649 {
3650 u32 pipestat_mask;
3651 u32 iir_mask;
3652
3653 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3654 PIPE_FIFO_UNDERRUN_STATUS;
3655
3656 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3657 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3658 POSTING_READ(PIPESTAT(PIPE_A));
3659
3660 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3661 PIPE_CRC_DONE_INTERRUPT_STATUS;
3662
3663 i915_enable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3664 PIPE_GMBUS_INTERRUPT_STATUS);
3665 i915_enable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3666
3667 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3668 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3669 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3670 dev_priv->irq_mask &= ~iir_mask;
3671
3672 I915_WRITE(VLV_IIR, iir_mask);
3673 I915_WRITE(VLV_IIR, iir_mask);
3674 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3675 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3676 POSTING_READ(VLV_IER);
3677 }
3678
3679 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3680 {
3681 u32 pipestat_mask;
3682 u32 iir_mask;
3683
3684 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3685 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3686 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3687
3688 dev_priv->irq_mask |= iir_mask;
3689 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3690 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3691 I915_WRITE(VLV_IIR, iir_mask);
3692 I915_WRITE(VLV_IIR, iir_mask);
3693 POSTING_READ(VLV_IIR);
3694
3695 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3696 PIPE_CRC_DONE_INTERRUPT_STATUS;
3697
3698 i915_disable_pipestat(dev_priv, PIPE_A, pipestat_mask |
3699 PIPE_GMBUS_INTERRUPT_STATUS);
3700 i915_disable_pipestat(dev_priv, PIPE_B, pipestat_mask);
3701
3702 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3703 PIPE_FIFO_UNDERRUN_STATUS;
3704 I915_WRITE(PIPESTAT(PIPE_A), pipestat_mask);
3705 I915_WRITE(PIPESTAT(PIPE_B), pipestat_mask);
3706 POSTING_READ(PIPESTAT(PIPE_A));
3707 }
3708
3709 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3710 {
3711 assert_spin_locked(&dev_priv->irq_lock);
3712
3713 if (dev_priv->display_irqs_enabled)
3714 return;
3715
3716 dev_priv->display_irqs_enabled = true;
3717
3718 if (intel_irqs_enabled(dev_priv))
3719 valleyview_display_irqs_install(dev_priv);
3720 }
3721
3722 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3723 {
3724 assert_spin_locked(&dev_priv->irq_lock);
3725
3726 if (!dev_priv->display_irqs_enabled)
3727 return;
3728
3729 dev_priv->display_irqs_enabled = false;
3730
3731 if (intel_irqs_enabled(dev_priv))
3732 valleyview_display_irqs_uninstall(dev_priv);
3733 }
3734
3735 static int valleyview_irq_postinstall(struct drm_device *dev)
3736 {
3737 struct drm_i915_private *dev_priv = dev->dev_private;
3738
3739 dev_priv->irq_mask = ~0;
3740
3741 I915_WRITE(PORT_HOTPLUG_EN, 0);
3742 POSTING_READ(PORT_HOTPLUG_EN);
3743
3744 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3745 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3746 I915_WRITE(VLV_IIR, 0xffffffff);
3747 POSTING_READ(VLV_IER);
3748
3749 /* Interrupt setup is already guaranteed to be single-threaded, this is
3750 * just to make the assert_spin_locked check happy. */
3751 spin_lock_irq(&dev_priv->irq_lock);
3752 if (dev_priv->display_irqs_enabled)
3753 valleyview_display_irqs_install(dev_priv);
3754 spin_unlock_irq(&dev_priv->irq_lock);
3755
3756 I915_WRITE(VLV_IIR, 0xffffffff);
3757 I915_WRITE(VLV_IIR, 0xffffffff);
3758
3759 gen5_gt_irq_postinstall(dev);
3760
3761 /* ack & enable invalid PTE error interrupts */
3762 #if 0 /* FIXME: add support to irq handler for checking these bits */
3763 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3764 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3765 #endif
3766
3767 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3768
3769 return 0;
3770 }
3771
3772 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3773 {
3774 /* These are interrupts we'll toggle with the ring mask register */
3775 uint32_t gt_interrupts[] = {
3776 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3777 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3778 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3779 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3780 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3781 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3782 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3783 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3784 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3785 0,
3786 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3787 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3788 };
3789
3790 dev_priv->pm_irq_mask = 0xffffffff;
3791 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3792 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3793 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
3794 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3795 }
3796
3797 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3798 {
3799 uint32_t de_pipe_masked = GEN8_PIPE_PRIMARY_FLIP_DONE |
3800 GEN8_PIPE_CDCLK_CRC_DONE |
3801 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3802 uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3803 GEN8_PIPE_FIFO_UNDERRUN;
3804 int pipe;
3805 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3806 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3807 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3808
3809 for_each_pipe(dev_priv, pipe)
3810 if (intel_display_power_enabled(dev_priv,
3811 POWER_DOMAIN_PIPE(pipe)))
3812 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3813 dev_priv->de_irq_mask[pipe],
3814 de_pipe_enables);
3815
3816 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~GEN8_AUX_CHANNEL_A, GEN8_AUX_CHANNEL_A);
3817 }
3818
3819 static int gen8_irq_postinstall(struct drm_device *dev)
3820 {
3821 struct drm_i915_private *dev_priv = dev->dev_private;
3822
3823 ibx_irq_pre_postinstall(dev);
3824
3825 gen8_gt_irq_postinstall(dev_priv);
3826 gen8_de_irq_postinstall(dev_priv);
3827
3828 ibx_irq_postinstall(dev);
3829
3830 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3831 POSTING_READ(GEN8_MASTER_IRQ);
3832
3833 return 0;
3834 }
3835
3836 static int cherryview_irq_postinstall(struct drm_device *dev)
3837 {
3838 struct drm_i915_private *dev_priv = dev->dev_private;
3839 u32 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3840 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3841 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3842 I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3843 u32 pipestat_enable = PLANE_FLIP_DONE_INT_STATUS_VLV |
3844 PIPE_CRC_DONE_INTERRUPT_STATUS;
3845 int pipe;
3846
3847 /*
3848 * Leave vblank interrupts masked initially. enable/disable will
3849 * toggle them based on usage.
3850 */
3851 dev_priv->irq_mask = ~enable_mask;
3852
3853 for_each_pipe(dev_priv, pipe)
3854 I915_WRITE(PIPESTAT(pipe), 0xffff);
3855
3856 spin_lock_irq(&dev_priv->irq_lock);
3857 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3858 for_each_pipe(dev_priv, pipe)
3859 i915_enable_pipestat(dev_priv, pipe, pipestat_enable);
3860 spin_unlock_irq(&dev_priv->irq_lock);
3861
3862 I915_WRITE(VLV_IIR, 0xffffffff);
3863 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3864 I915_WRITE(VLV_IER, enable_mask);
3865
3866 gen8_gt_irq_postinstall(dev_priv);
3867
3868 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3869 POSTING_READ(GEN8_MASTER_IRQ);
3870
3871 return 0;
3872 }
3873
3874 static void gen8_irq_uninstall(struct drm_device *dev)
3875 {
3876 struct drm_i915_private *dev_priv = dev->dev_private;
3877
3878 if (!dev_priv)
3879 return;
3880
3881 gen8_irq_reset(dev);
3882 }
3883
3884 static void valleyview_irq_uninstall(struct drm_device *dev)
3885 {
3886 struct drm_i915_private *dev_priv = dev->dev_private;
3887 int pipe;
3888
3889 if (!dev_priv)
3890 return;
3891
3892 I915_WRITE(VLV_MASTER_IER, 0);
3893
3894 for_each_pipe(dev_priv, pipe)
3895 I915_WRITE(PIPESTAT(pipe), 0xffff);
3896
3897 I915_WRITE(HWSTAM, 0xffffffff);
3898 I915_WRITE(PORT_HOTPLUG_EN, 0);
3899 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3900
3901 /* Interrupt setup is already guaranteed to be single-threaded, this is
3902 * just to make the assert_spin_locked check happy. */
3903 spin_lock_irq(&dev_priv->irq_lock);
3904 if (dev_priv->display_irqs_enabled)
3905 valleyview_display_irqs_uninstall(dev_priv);
3906 spin_unlock_irq(&dev_priv->irq_lock);
3907
3908 dev_priv->irq_mask = 0;
3909
3910 I915_WRITE(VLV_IIR, 0xffffffff);
3911 I915_WRITE(VLV_IMR, 0xffffffff);
3912 I915_WRITE(VLV_IER, 0x0);
3913 POSTING_READ(VLV_IER);
3914 }
3915
3916 static void cherryview_irq_uninstall(struct drm_device *dev)
3917 {
3918 struct drm_i915_private *dev_priv = dev->dev_private;
3919 int pipe;
3920
3921 if (!dev_priv)
3922 return;
3923
3924 I915_WRITE(GEN8_MASTER_IRQ, 0);
3925 POSTING_READ(GEN8_MASTER_IRQ);
3926
3927 #define GEN8_IRQ_FINI_NDX(type, which) \
3928 do { \
3929 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
3930 I915_WRITE(GEN8_##type##_IER(which), 0); \
3931 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3932 POSTING_READ(GEN8_##type##_IIR(which)); \
3933 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
3934 } while (0)
3935
3936 #define GEN8_IRQ_FINI(type) \
3937 do { \
3938 I915_WRITE(GEN8_##type##_IMR, 0xffffffff); \
3939 I915_WRITE(GEN8_##type##_IER, 0); \
3940 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3941 POSTING_READ(GEN8_##type##_IIR); \
3942 I915_WRITE(GEN8_##type##_IIR, 0xffffffff); \
3943 } while (0)
3944
3945 GEN8_IRQ_FINI_NDX(GT, 0);
3946 GEN8_IRQ_FINI_NDX(GT, 1);
3947 GEN8_IRQ_FINI_NDX(GT, 2);
3948 GEN8_IRQ_FINI_NDX(GT, 3);
3949
3950 GEN8_IRQ_FINI(PCU);
3951
3952 #undef GEN8_IRQ_FINI
3953 #undef GEN8_IRQ_FINI_NDX
3954
3955 I915_WRITE(PORT_HOTPLUG_EN, 0);
3956 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3957
3958 for_each_pipe(dev_priv, pipe)
3959 I915_WRITE(PIPESTAT(pipe), 0xffff);
3960
3961 I915_WRITE(VLV_IMR, 0xffffffff);
3962 I915_WRITE(VLV_IER, 0x0);
3963 I915_WRITE(VLV_IIR, 0xffffffff);
3964 POSTING_READ(VLV_IIR);
3965 }
3966
3967 static void ironlake_irq_uninstall(struct drm_device *dev)
3968 {
3969 struct drm_i915_private *dev_priv = dev->dev_private;
3970
3971 if (!dev_priv)
3972 return;
3973
3974 ironlake_irq_reset(dev);
3975 }
3976
3977 static void i8xx_irq_preinstall(struct drm_device * dev)
3978 {
3979 struct drm_i915_private *dev_priv = dev->dev_private;
3980 int pipe;
3981
3982 for_each_pipe(dev_priv, pipe)
3983 I915_WRITE(PIPESTAT(pipe), 0);
3984 I915_WRITE16(IMR, 0xffff);
3985 I915_WRITE16(IER, 0x0);
3986 POSTING_READ16(IER);
3987 }
3988
3989 static int i8xx_irq_postinstall(struct drm_device *dev)
3990 {
3991 struct drm_i915_private *dev_priv = dev->dev_private;
3992
3993 I915_WRITE16(EMR,
3994 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3995
3996 /* Unmask the interrupts that we always want on. */
3997 dev_priv->irq_mask =
3998 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3999 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4000 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4001 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4002 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4003 I915_WRITE16(IMR, dev_priv->irq_mask);
4004
4005 I915_WRITE16(IER,
4006 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4007 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4008 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4009 I915_USER_INTERRUPT);
4010 POSTING_READ16(IER);
4011
4012 /* Interrupt setup is already guaranteed to be single-threaded, this is
4013 * just to make the assert_spin_locked check happy. */
4014 spin_lock_irq(&dev_priv->irq_lock);
4015 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4016 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4017 spin_unlock_irq(&dev_priv->irq_lock);
4018
4019 return 0;
4020 }
4021
4022 /*
4023 * Returns true when a page flip has completed.
4024 */
4025 static bool i8xx_handle_vblank(struct drm_device *dev,
4026 int plane, int pipe, u32 iir)
4027 {
4028 struct drm_i915_private *dev_priv = dev->dev_private;
4029 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4030
4031 if (!intel_pipe_handle_vblank(dev, pipe))
4032 return false;
4033
4034 if ((iir & flip_pending) == 0)
4035 goto check_page_flip;
4036
4037 intel_prepare_page_flip(dev, plane);
4038
4039 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4040 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4041 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4042 * the flip is completed (no longer pending). Since this doesn't raise
4043 * an interrupt per se, we watch for the change at vblank.
4044 */
4045 if (I915_READ16(ISR) & flip_pending)
4046 goto check_page_flip;
4047
4048 intel_finish_page_flip(dev, pipe);
4049 return true;
4050
4051 check_page_flip:
4052 intel_check_page_flip(dev, pipe);
4053 return false;
4054 }
4055
4056 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
4057 {
4058 struct drm_device *dev = arg;
4059 struct drm_i915_private *dev_priv = dev->dev_private;
4060 u16 iir, new_iir;
4061 u32 pipe_stats[2];
4062 int pipe;
4063 u16 flip_mask =
4064 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4065 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4066
4067 iir = I915_READ16(IIR);
4068 if (iir == 0)
4069 return IRQ_NONE;
4070
4071 while (iir & ~flip_mask) {
4072 /* Can't rely on pipestat interrupt bit in iir as it might
4073 * have been cleared after the pipestat interrupt was received.
4074 * It doesn't set the bit in iir again, but it still produces
4075 * interrupts (for non-MSI).
4076 */
4077 spin_lock(&dev_priv->irq_lock);
4078 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4079 i915_handle_error(dev, false,
4080 "Command parser error, iir 0x%08x",
4081 iir);
4082
4083 for_each_pipe(dev_priv, pipe) {
4084 int reg = PIPESTAT(pipe);
4085 pipe_stats[pipe] = I915_READ(reg);
4086
4087 /*
4088 * Clear the PIPE*STAT regs before the IIR
4089 */
4090 if (pipe_stats[pipe] & 0x8000ffff)
4091 I915_WRITE(reg, pipe_stats[pipe]);
4092 }
4093 spin_unlock(&dev_priv->irq_lock);
4094
4095 I915_WRITE16(IIR, iir & ~flip_mask);
4096 new_iir = I915_READ16(IIR); /* Flush posted writes */
4097
4098 i915_update_dri1_breadcrumb(dev);
4099
4100 if (iir & I915_USER_INTERRUPT)
4101 notify_ring(dev, &dev_priv->ring[RCS]);
4102
4103 for_each_pipe(dev_priv, pipe) {
4104 int plane = pipe;
4105 if (HAS_FBC(dev))
4106 plane = !plane;
4107
4108 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4109 i8xx_handle_vblank(dev, plane, pipe, iir))
4110 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4111
4112 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4113 i9xx_pipe_crc_irq_handler(dev, pipe);
4114
4115 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4116 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4117 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4118 }
4119
4120 iir = new_iir;
4121 }
4122
4123 return IRQ_HANDLED;
4124 }
4125
4126 static void i8xx_irq_uninstall(struct drm_device * dev)
4127 {
4128 struct drm_i915_private *dev_priv = dev->dev_private;
4129 int pipe;
4130
4131 for_each_pipe(dev_priv, pipe) {
4132 /* Clear enable bits; then clear status bits */
4133 I915_WRITE(PIPESTAT(pipe), 0);
4134 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4135 }
4136 I915_WRITE16(IMR, 0xffff);
4137 I915_WRITE16(IER, 0x0);
4138 I915_WRITE16(IIR, I915_READ16(IIR));
4139 }
4140
4141 static void i915_irq_preinstall(struct drm_device * dev)
4142 {
4143 struct drm_i915_private *dev_priv = dev->dev_private;
4144 int pipe;
4145
4146 if (I915_HAS_HOTPLUG(dev)) {
4147 I915_WRITE(PORT_HOTPLUG_EN, 0);
4148 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4149 }
4150
4151 I915_WRITE16(HWSTAM, 0xeffe);
4152 for_each_pipe(dev_priv, pipe)
4153 I915_WRITE(PIPESTAT(pipe), 0);
4154 I915_WRITE(IMR, 0xffffffff);
4155 I915_WRITE(IER, 0x0);
4156 POSTING_READ(IER);
4157 }
4158
4159 static int i915_irq_postinstall(struct drm_device *dev)
4160 {
4161 struct drm_i915_private *dev_priv = dev->dev_private;
4162 u32 enable_mask;
4163
4164 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4165
4166 /* Unmask the interrupts that we always want on. */
4167 dev_priv->irq_mask =
4168 ~(I915_ASLE_INTERRUPT |
4169 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4170 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4171 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4172 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4173 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4174
4175 enable_mask =
4176 I915_ASLE_INTERRUPT |
4177 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4178 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4179 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
4180 I915_USER_INTERRUPT;
4181
4182 if (I915_HAS_HOTPLUG(dev)) {
4183 I915_WRITE(PORT_HOTPLUG_EN, 0);
4184 POSTING_READ(PORT_HOTPLUG_EN);
4185
4186 /* Enable in IER... */
4187 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4188 /* and unmask in IMR */
4189 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4190 }
4191
4192 I915_WRITE(IMR, dev_priv->irq_mask);
4193 I915_WRITE(IER, enable_mask);
4194 POSTING_READ(IER);
4195
4196 i915_enable_asle_pipestat(dev);
4197
4198 /* Interrupt setup is already guaranteed to be single-threaded, this is
4199 * just to make the assert_spin_locked check happy. */
4200 spin_lock_irq(&dev_priv->irq_lock);
4201 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4202 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4203 spin_unlock_irq(&dev_priv->irq_lock);
4204
4205 return 0;
4206 }
4207
4208 /*
4209 * Returns true when a page flip has completed.
4210 */
4211 static bool i915_handle_vblank(struct drm_device *dev,
4212 int plane, int pipe, u32 iir)
4213 {
4214 struct drm_i915_private *dev_priv = dev->dev_private;
4215 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4216
4217 if (!intel_pipe_handle_vblank(dev, pipe))
4218 return false;
4219
4220 if ((iir & flip_pending) == 0)
4221 goto check_page_flip;
4222
4223 intel_prepare_page_flip(dev, plane);
4224
4225 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4226 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4227 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4228 * the flip is completed (no longer pending). Since this doesn't raise
4229 * an interrupt per se, we watch for the change at vblank.
4230 */
4231 if (I915_READ(ISR) & flip_pending)
4232 goto check_page_flip;
4233
4234 intel_finish_page_flip(dev, pipe);
4235 return true;
4236
4237 check_page_flip:
4238 intel_check_page_flip(dev, pipe);
4239 return false;
4240 }
4241
4242 static irqreturn_t i915_irq_handler(int irq, void *arg)
4243 {
4244 struct drm_device *dev = arg;
4245 struct drm_i915_private *dev_priv = dev->dev_private;
4246 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4247 u32 flip_mask =
4248 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4249 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4250 int pipe, ret = IRQ_NONE;
4251
4252 iir = I915_READ(IIR);
4253 do {
4254 bool irq_received = (iir & ~flip_mask) != 0;
4255 bool blc_event = false;
4256
4257 /* Can't rely on pipestat interrupt bit in iir as it might
4258 * have been cleared after the pipestat interrupt was received.
4259 * It doesn't set the bit in iir again, but it still produces
4260 * interrupts (for non-MSI).
4261 */
4262 spin_lock(&dev_priv->irq_lock);
4263 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4264 i915_handle_error(dev, false,
4265 "Command parser error, iir 0x%08x",
4266 iir);
4267
4268 for_each_pipe(dev_priv, pipe) {
4269 int reg = PIPESTAT(pipe);
4270 pipe_stats[pipe] = I915_READ(reg);
4271
4272 /* Clear the PIPE*STAT regs before the IIR */
4273 if (pipe_stats[pipe] & 0x8000ffff) {
4274 I915_WRITE(reg, pipe_stats[pipe]);
4275 irq_received = true;
4276 }
4277 }
4278 spin_unlock(&dev_priv->irq_lock);
4279
4280 if (!irq_received)
4281 break;
4282
4283 /* Consume port. Then clear IIR or we'll miss events */
4284 if (I915_HAS_HOTPLUG(dev) &&
4285 iir & I915_DISPLAY_PORT_INTERRUPT)
4286 i9xx_hpd_irq_handler(dev);
4287
4288 I915_WRITE(IIR, iir & ~flip_mask);
4289 new_iir = I915_READ(IIR); /* Flush posted writes */
4290
4291 if (iir & I915_USER_INTERRUPT)
4292 notify_ring(dev, &dev_priv->ring[RCS]);
4293
4294 for_each_pipe(dev_priv, pipe) {
4295 int plane = pipe;
4296 if (HAS_FBC(dev))
4297 plane = !plane;
4298
4299 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4300 i915_handle_vblank(dev, plane, pipe, iir))
4301 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4302
4303 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4304 blc_event = true;
4305
4306 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4307 i9xx_pipe_crc_irq_handler(dev, pipe);
4308
4309 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4310 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4311 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4312 }
4313
4314 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4315 intel_opregion_asle_intr(dev);
4316
4317 /* With MSI, interrupts are only generated when iir
4318 * transitions from zero to nonzero. If another bit got
4319 * set while we were handling the existing iir bits, then
4320 * we would never get another interrupt.
4321 *
4322 * This is fine on non-MSI as well, as if we hit this path
4323 * we avoid exiting the interrupt handler only to generate
4324 * another one.
4325 *
4326 * Note that for MSI this could cause a stray interrupt report
4327 * if an interrupt landed in the time between writing IIR and
4328 * the posting read. This should be rare enough to never
4329 * trigger the 99% of 100,000 interrupts test for disabling
4330 * stray interrupts.
4331 */
4332 ret = IRQ_HANDLED;
4333 iir = new_iir;
4334 } while (iir & ~flip_mask);
4335
4336 i915_update_dri1_breadcrumb(dev);
4337
4338 return ret;
4339 }
4340
4341 static void i915_irq_uninstall(struct drm_device * dev)
4342 {
4343 struct drm_i915_private *dev_priv = dev->dev_private;
4344 int pipe;
4345
4346 if (I915_HAS_HOTPLUG(dev)) {
4347 I915_WRITE(PORT_HOTPLUG_EN, 0);
4348 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4349 }
4350
4351 I915_WRITE16(HWSTAM, 0xffff);
4352 for_each_pipe(dev_priv, pipe) {
4353 /* Clear enable bits; then clear status bits */
4354 I915_WRITE(PIPESTAT(pipe), 0);
4355 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4356 }
4357 I915_WRITE(IMR, 0xffffffff);
4358 I915_WRITE(IER, 0x0);
4359
4360 I915_WRITE(IIR, I915_READ(IIR));
4361 }
4362
4363 static void i965_irq_preinstall(struct drm_device * dev)
4364 {
4365 struct drm_i915_private *dev_priv = dev->dev_private;
4366 int pipe;
4367
4368 I915_WRITE(PORT_HOTPLUG_EN, 0);
4369 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4370
4371 I915_WRITE(HWSTAM, 0xeffe);
4372 for_each_pipe(dev_priv, pipe)
4373 I915_WRITE(PIPESTAT(pipe), 0);
4374 I915_WRITE(IMR, 0xffffffff);
4375 I915_WRITE(IER, 0x0);
4376 POSTING_READ(IER);
4377 }
4378
4379 static int i965_irq_postinstall(struct drm_device *dev)
4380 {
4381 struct drm_i915_private *dev_priv = dev->dev_private;
4382 u32 enable_mask;
4383 u32 error_mask;
4384
4385 /* Unmask the interrupts that we always want on. */
4386 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4387 I915_DISPLAY_PORT_INTERRUPT |
4388 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4389 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4390 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4391 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4392 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4393
4394 enable_mask = ~dev_priv->irq_mask;
4395 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4396 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4397 enable_mask |= I915_USER_INTERRUPT;
4398
4399 if (IS_G4X(dev))
4400 enable_mask |= I915_BSD_USER_INTERRUPT;
4401
4402 /* Interrupt setup is already guaranteed to be single-threaded, this is
4403 * just to make the assert_spin_locked check happy. */
4404 spin_lock_irq(&dev_priv->irq_lock);
4405 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4406 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4407 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4408 spin_unlock_irq(&dev_priv->irq_lock);
4409
4410 /*
4411 * Enable some error detection, note the instruction error mask
4412 * bit is reserved, so we leave it masked.
4413 */
4414 if (IS_G4X(dev)) {
4415 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4416 GM45_ERROR_MEM_PRIV |
4417 GM45_ERROR_CP_PRIV |
4418 I915_ERROR_MEMORY_REFRESH);
4419 } else {
4420 error_mask = ~(I915_ERROR_PAGE_TABLE |
4421 I915_ERROR_MEMORY_REFRESH);
4422 }
4423 I915_WRITE(EMR, error_mask);
4424
4425 I915_WRITE(IMR, dev_priv->irq_mask);
4426 I915_WRITE(IER, enable_mask);
4427 POSTING_READ(IER);
4428
4429 I915_WRITE(PORT_HOTPLUG_EN, 0);
4430 POSTING_READ(PORT_HOTPLUG_EN);
4431
4432 i915_enable_asle_pipestat(dev);
4433
4434 return 0;
4435 }
4436
4437 static void i915_hpd_irq_setup(struct drm_device *dev)
4438 {
4439 struct drm_i915_private *dev_priv = dev->dev_private;
4440 struct intel_encoder *intel_encoder;
4441 u32 hotplug_en;
4442
4443 assert_spin_locked(&dev_priv->irq_lock);
4444
4445 if (I915_HAS_HOTPLUG(dev)) {
4446 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4447 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4448 /* Note HDMI and DP share hotplug bits */
4449 /* enable bits are the same for all generations */
4450 for_each_intel_encoder(dev, intel_encoder)
4451 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4452 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4453 /* Programming the CRT detection parameters tends
4454 to generate a spurious hotplug event about three
4455 seconds later. So just do it once.
4456 */
4457 if (IS_G4X(dev))
4458 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4459 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4460 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4461
4462 /* Ignore TV since it's buggy */
4463 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4464 }
4465 }
4466
4467 static irqreturn_t i965_irq_handler(int irq, void *arg)
4468 {
4469 struct drm_device *dev = arg;
4470 struct drm_i915_private *dev_priv = dev->dev_private;
4471 u32 iir, new_iir;
4472 u32 pipe_stats[I915_MAX_PIPES];
4473 int ret = IRQ_NONE, pipe;
4474 u32 flip_mask =
4475 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4476 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4477
4478 iir = I915_READ(IIR);
4479
4480 for (;;) {
4481 bool irq_received = (iir & ~flip_mask) != 0;
4482 bool blc_event = false;
4483
4484 /* Can't rely on pipestat interrupt bit in iir as it might
4485 * have been cleared after the pipestat interrupt was received.
4486 * It doesn't set the bit in iir again, but it still produces
4487 * interrupts (for non-MSI).
4488 */
4489 spin_lock(&dev_priv->irq_lock);
4490 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4491 i915_handle_error(dev, false,
4492 "Command parser error, iir 0x%08x",
4493 iir);
4494
4495 for_each_pipe(dev_priv, pipe) {
4496 int reg = PIPESTAT(pipe);
4497 pipe_stats[pipe] = I915_READ(reg);
4498
4499 /*
4500 * Clear the PIPE*STAT regs before the IIR
4501 */
4502 if (pipe_stats[pipe] & 0x8000ffff) {
4503 I915_WRITE(reg, pipe_stats[pipe]);
4504 irq_received = true;
4505 }
4506 }
4507 spin_unlock(&dev_priv->irq_lock);
4508
4509 if (!irq_received)
4510 break;
4511
4512 ret = IRQ_HANDLED;
4513
4514 /* Consume port. Then clear IIR or we'll miss events */
4515 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4516 i9xx_hpd_irq_handler(dev);
4517
4518 I915_WRITE(IIR, iir & ~flip_mask);
4519 new_iir = I915_READ(IIR); /* Flush posted writes */
4520
4521 if (iir & I915_USER_INTERRUPT)
4522 notify_ring(dev, &dev_priv->ring[RCS]);
4523 if (iir & I915_BSD_USER_INTERRUPT)
4524 notify_ring(dev, &dev_priv->ring[VCS]);
4525
4526 for_each_pipe(dev_priv, pipe) {
4527 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4528 i915_handle_vblank(dev, pipe, pipe, iir))
4529 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4530
4531 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4532 blc_event = true;
4533
4534 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4535 i9xx_pipe_crc_irq_handler(dev, pipe);
4536
4537 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS &&
4538 intel_set_cpu_fifo_underrun_reporting(dev, pipe, false))
4539 DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
4540 }
4541
4542 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4543 intel_opregion_asle_intr(dev);
4544
4545 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4546 gmbus_irq_handler(dev);
4547
4548 /* With MSI, interrupts are only generated when iir
4549 * transitions from zero to nonzero. If another bit got
4550 * set while we were handling the existing iir bits, then
4551 * we would never get another interrupt.
4552 *
4553 * This is fine on non-MSI as well, as if we hit this path
4554 * we avoid exiting the interrupt handler only to generate
4555 * another one.
4556 *
4557 * Note that for MSI this could cause a stray interrupt report
4558 * if an interrupt landed in the time between writing IIR and
4559 * the posting read. This should be rare enough to never
4560 * trigger the 99% of 100,000 interrupts test for disabling
4561 * stray interrupts.
4562 */
4563 iir = new_iir;
4564 }
4565
4566 i915_update_dri1_breadcrumb(dev);
4567
4568 return ret;
4569 }
4570
4571 static void i965_irq_uninstall(struct drm_device * dev)
4572 {
4573 struct drm_i915_private *dev_priv = dev->dev_private;
4574 int pipe;
4575
4576 if (!dev_priv)
4577 return;
4578
4579 I915_WRITE(PORT_HOTPLUG_EN, 0);
4580 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4581
4582 I915_WRITE(HWSTAM, 0xffffffff);
4583 for_each_pipe(dev_priv, pipe)
4584 I915_WRITE(PIPESTAT(pipe), 0);
4585 I915_WRITE(IMR, 0xffffffff);
4586 I915_WRITE(IER, 0x0);
4587
4588 for_each_pipe(dev_priv, pipe)
4589 I915_WRITE(PIPESTAT(pipe),
4590 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4591 I915_WRITE(IIR, I915_READ(IIR));
4592 }
4593
4594 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4595 {
4596 struct drm_i915_private *dev_priv =
4597 container_of(work, typeof(*dev_priv),
4598 hotplug_reenable_work.work);
4599 struct drm_device *dev = dev_priv->dev;
4600 struct drm_mode_config *mode_config = &dev->mode_config;
4601 int i;
4602
4603 intel_runtime_pm_get(dev_priv);
4604
4605 spin_lock_irq(&dev_priv->irq_lock);
4606 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4607 struct drm_connector *connector;
4608
4609 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4610 continue;
4611
4612 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4613
4614 list_for_each_entry(connector, &mode_config->connector_list, head) {
4615 struct intel_connector *intel_connector = to_intel_connector(connector);
4616
4617 if (intel_connector->encoder->hpd_pin == i) {
4618 if (connector->polled != intel_connector->polled)
4619 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4620 connector->name);
4621 connector->polled = intel_connector->polled;
4622 if (!connector->polled)
4623 connector->polled = DRM_CONNECTOR_POLL_HPD;
4624 }
4625 }
4626 }
4627 if (dev_priv->display.hpd_irq_setup)
4628 dev_priv->display.hpd_irq_setup(dev);
4629 spin_unlock_irq(&dev_priv->irq_lock);
4630
4631 intel_runtime_pm_put(dev_priv);
4632 }
4633
4634 void intel_irq_init(struct drm_device *dev)
4635 {
4636 struct drm_i915_private *dev_priv = dev->dev_private;
4637
4638 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4639 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4640 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
4641 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4642 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4643
4644 /* Let's track the enabled rps events */
4645 if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
4646 /* WaGsvRC0ResidencyMethod:vlv */
4647 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4648 else
4649 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4650
4651 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
4652 i915_hangcheck_elapsed,
4653 (unsigned long) dev);
4654 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4655 intel_hpd_irq_reenable_work);
4656
4657 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4658
4659 /* Haven't installed the IRQ handler yet */
4660 dev_priv->pm._irqs_disabled = true;
4661
4662 if (IS_GEN2(dev)) {
4663 dev->max_vblank_count = 0;
4664 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4665 } else if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
4666 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4667 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4668 } else {
4669 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4670 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4671 }
4672
4673 /*
4674 * Opt out of the vblank disable timer on everything except gen2.
4675 * Gen2 doesn't have a hardware frame counter and so depends on
4676 * vblank interrupts to produce sane vblank seuquence numbers.
4677 */
4678 if (!IS_GEN2(dev))
4679 dev->vblank_disable_immediate = true;
4680
4681 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4682 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4683 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4684 }
4685
4686 if (IS_CHERRYVIEW(dev)) {
4687 dev->driver->irq_handler = cherryview_irq_handler;
4688 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4689 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4690 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4691 dev->driver->enable_vblank = valleyview_enable_vblank;
4692 dev->driver->disable_vblank = valleyview_disable_vblank;
4693 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4694 } else if (IS_VALLEYVIEW(dev)) {
4695 dev->driver->irq_handler = valleyview_irq_handler;
4696 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4697 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4698 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4699 dev->driver->enable_vblank = valleyview_enable_vblank;
4700 dev->driver->disable_vblank = valleyview_disable_vblank;
4701 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4702 } else if (IS_GEN8(dev)) {
4703 dev->driver->irq_handler = gen8_irq_handler;
4704 dev->driver->irq_preinstall = gen8_irq_reset;
4705 dev->driver->irq_postinstall = gen8_irq_postinstall;
4706 dev->driver->irq_uninstall = gen8_irq_uninstall;
4707 dev->driver->enable_vblank = gen8_enable_vblank;
4708 dev->driver->disable_vblank = gen8_disable_vblank;
4709 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4710 } else if (HAS_PCH_SPLIT(dev)) {
4711 dev->driver->irq_handler = ironlake_irq_handler;
4712 dev->driver->irq_preinstall = ironlake_irq_reset;
4713 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4714 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4715 dev->driver->enable_vblank = ironlake_enable_vblank;
4716 dev->driver->disable_vblank = ironlake_disable_vblank;
4717 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4718 } else {
4719 if (INTEL_INFO(dev)->gen == 2) {
4720 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4721 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4722 dev->driver->irq_handler = i8xx_irq_handler;
4723 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4724 } else if (INTEL_INFO(dev)->gen == 3) {
4725 dev->driver->irq_preinstall = i915_irq_preinstall;
4726 dev->driver->irq_postinstall = i915_irq_postinstall;
4727 dev->driver->irq_uninstall = i915_irq_uninstall;
4728 dev->driver->irq_handler = i915_irq_handler;
4729 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4730 } else {
4731 dev->driver->irq_preinstall = i965_irq_preinstall;
4732 dev->driver->irq_postinstall = i965_irq_postinstall;
4733 dev->driver->irq_uninstall = i965_irq_uninstall;
4734 dev->driver->irq_handler = i965_irq_handler;
4735 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4736 }
4737 dev->driver->enable_vblank = i915_enable_vblank;
4738 dev->driver->disable_vblank = i915_disable_vblank;
4739 }
4740 }
4741
4742 void intel_hpd_init(struct drm_device *dev)
4743 {
4744 struct drm_i915_private *dev_priv = dev->dev_private;
4745 struct drm_mode_config *mode_config = &dev->mode_config;
4746 struct drm_connector *connector;
4747 int i;
4748
4749 for (i = 1; i < HPD_NUM_PINS; i++) {
4750 dev_priv->hpd_stats[i].hpd_cnt = 0;
4751 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4752 }
4753 list_for_each_entry(connector, &mode_config->connector_list, head) {
4754 struct intel_connector *intel_connector = to_intel_connector(connector);
4755 connector->polled = intel_connector->polled;
4756 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4757 connector->polled = DRM_CONNECTOR_POLL_HPD;
4758 if (intel_connector->mst_port)
4759 connector->polled = DRM_CONNECTOR_POLL_HPD;
4760 }
4761
4762 /* Interrupt setup is already guaranteed to be single-threaded, this is
4763 * just to make the assert_spin_locked checks happy. */
4764 spin_lock_irq(&dev_priv->irq_lock);
4765 if (dev_priv->display.hpd_irq_setup)
4766 dev_priv->display.hpd_irq_setup(dev);
4767 spin_unlock_irq(&dev_priv->irq_lock);
4768 }
4769
4770 /* Disable interrupts so we can allow runtime PM. */
4771 void intel_runtime_pm_disable_interrupts(struct drm_device *dev)
4772 {
4773 struct drm_i915_private *dev_priv = dev->dev_private;
4774
4775 dev->driver->irq_uninstall(dev);
4776 dev_priv->pm._irqs_disabled = true;
4777 }
4778
4779 /* Restore interrupts so we can recover from runtime PM. */
4780 void intel_runtime_pm_restore_interrupts(struct drm_device *dev)
4781 {
4782 struct drm_i915_private *dev_priv = dev->dev_private;
4783
4784 dev_priv->pm._irqs_disabled = false;
4785 dev->driver->irq_preinstall(dev);
4786 dev->driver->irq_postinstall(dev);
4787 }
This page took 0.165508 seconds and 6 git commands to generate.