drm/i915: Allow DMA pagetables to use highmem
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ilk[HPD_NUM_PINS] = {
49 [HPD_PORT_A] = DE_DP_A_HOTPLUG,
50 };
51
52 static const u32 hpd_ivb[HPD_NUM_PINS] = {
53 [HPD_PORT_A] = DE_DP_A_HOTPLUG_IVB,
54 };
55
56 static const u32 hpd_bdw[HPD_NUM_PINS] = {
57 [HPD_PORT_A] = GEN8_PORT_DP_A_HOTPLUG,
58 };
59
60 static const u32 hpd_ibx[HPD_NUM_PINS] = {
61 [HPD_CRT] = SDE_CRT_HOTPLUG,
62 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
63 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
64 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
65 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
66 };
67
68 static const u32 hpd_cpt[HPD_NUM_PINS] = {
69 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
70 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
71 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
72 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
73 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
74 };
75
76 static const u32 hpd_spt[HPD_NUM_PINS] = {
77 [HPD_PORT_A] = SDE_PORTA_HOTPLUG_SPT,
78 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
79 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
80 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
81 [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
82 };
83
84 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
85 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
86 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
87 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
88 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
89 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
90 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
91 };
92
93 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
94 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
95 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
96 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
97 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
98 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
99 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
100 };
101
102 static const u32 hpd_status_i915[HPD_NUM_PINS] = {
103 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
104 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
105 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
106 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
107 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
108 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
109 };
110
111 /* BXT hpd list */
112 static const u32 hpd_bxt[HPD_NUM_PINS] = {
113 [HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
114 [HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
115 [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
116 };
117
118 /* IIR can theoretically queue up two events. Be paranoid. */
119 #define GEN8_IRQ_RESET_NDX(type, which) do { \
120 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
121 POSTING_READ(GEN8_##type##_IMR(which)); \
122 I915_WRITE(GEN8_##type##_IER(which), 0); \
123 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
124 POSTING_READ(GEN8_##type##_IIR(which)); \
125 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
126 POSTING_READ(GEN8_##type##_IIR(which)); \
127 } while (0)
128
129 #define GEN5_IRQ_RESET(type) do { \
130 I915_WRITE(type##IMR, 0xffffffff); \
131 POSTING_READ(type##IMR); \
132 I915_WRITE(type##IER, 0); \
133 I915_WRITE(type##IIR, 0xffffffff); \
134 POSTING_READ(type##IIR); \
135 I915_WRITE(type##IIR, 0xffffffff); \
136 POSTING_READ(type##IIR); \
137 } while (0)
138
139 /*
140 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
141 */
142 static void gen5_assert_iir_is_zero(struct drm_i915_private *dev_priv,
143 i915_reg_t reg)
144 {
145 u32 val = I915_READ(reg);
146
147 if (val == 0)
148 return;
149
150 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
151 i915_mmio_reg_offset(reg), val);
152 I915_WRITE(reg, 0xffffffff);
153 POSTING_READ(reg);
154 I915_WRITE(reg, 0xffffffff);
155 POSTING_READ(reg);
156 }
157
158 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
159 gen5_assert_iir_is_zero(dev_priv, GEN8_##type##_IIR(which)); \
160 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
161 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
162 POSTING_READ(GEN8_##type##_IMR(which)); \
163 } while (0)
164
165 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
166 gen5_assert_iir_is_zero(dev_priv, type##IIR); \
167 I915_WRITE(type##IER, (ier_val)); \
168 I915_WRITE(type##IMR, (imr_val)); \
169 POSTING_READ(type##IMR); \
170 } while (0)
171
172 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
173
174 /* For display hotplug interrupt */
175 static inline void
176 i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
177 uint32_t mask,
178 uint32_t bits)
179 {
180 uint32_t val;
181
182 assert_spin_locked(&dev_priv->irq_lock);
183 WARN_ON(bits & ~mask);
184
185 val = I915_READ(PORT_HOTPLUG_EN);
186 val &= ~mask;
187 val |= bits;
188 I915_WRITE(PORT_HOTPLUG_EN, val);
189 }
190
191 /**
192 * i915_hotplug_interrupt_update - update hotplug interrupt enable
193 * @dev_priv: driver private
194 * @mask: bits to update
195 * @bits: bits to enable
196 * NOTE: the HPD enable bits are modified both inside and outside
197 * of an interrupt context. To avoid that read-modify-write cycles
198 * interfer, these bits are protected by a spinlock. Since this
199 * function is usually not called from a context where the lock is
200 * held already, this function acquires the lock itself. A non-locking
201 * version is also available.
202 */
203 void i915_hotplug_interrupt_update(struct drm_i915_private *dev_priv,
204 uint32_t mask,
205 uint32_t bits)
206 {
207 spin_lock_irq(&dev_priv->irq_lock);
208 i915_hotplug_interrupt_update_locked(dev_priv, mask, bits);
209 spin_unlock_irq(&dev_priv->irq_lock);
210 }
211
212 /**
213 * ilk_update_display_irq - update DEIMR
214 * @dev_priv: driver private
215 * @interrupt_mask: mask of interrupt bits to update
216 * @enabled_irq_mask: mask of interrupt bits to enable
217 */
218 void ilk_update_display_irq(struct drm_i915_private *dev_priv,
219 uint32_t interrupt_mask,
220 uint32_t enabled_irq_mask)
221 {
222 uint32_t new_val;
223
224 assert_spin_locked(&dev_priv->irq_lock);
225
226 WARN_ON(enabled_irq_mask & ~interrupt_mask);
227
228 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
229 return;
230
231 new_val = dev_priv->irq_mask;
232 new_val &= ~interrupt_mask;
233 new_val |= (~enabled_irq_mask & interrupt_mask);
234
235 if (new_val != dev_priv->irq_mask) {
236 dev_priv->irq_mask = new_val;
237 I915_WRITE(DEIMR, dev_priv->irq_mask);
238 POSTING_READ(DEIMR);
239 }
240 }
241
242 /**
243 * ilk_update_gt_irq - update GTIMR
244 * @dev_priv: driver private
245 * @interrupt_mask: mask of interrupt bits to update
246 * @enabled_irq_mask: mask of interrupt bits to enable
247 */
248 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
249 uint32_t interrupt_mask,
250 uint32_t enabled_irq_mask)
251 {
252 assert_spin_locked(&dev_priv->irq_lock);
253
254 WARN_ON(enabled_irq_mask & ~interrupt_mask);
255
256 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
257 return;
258
259 dev_priv->gt_irq_mask &= ~interrupt_mask;
260 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
261 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
262 }
263
264 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
265 {
266 ilk_update_gt_irq(dev_priv, mask, mask);
267 POSTING_READ_FW(GTIMR);
268 }
269
270 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
271 {
272 ilk_update_gt_irq(dev_priv, mask, 0);
273 }
274
275 static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv)
276 {
277 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
278 }
279
280 static i915_reg_t gen6_pm_imr(struct drm_i915_private *dev_priv)
281 {
282 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
283 }
284
285 static i915_reg_t gen6_pm_ier(struct drm_i915_private *dev_priv)
286 {
287 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
288 }
289
290 /**
291 * snb_update_pm_irq - update GEN6_PMIMR
292 * @dev_priv: driver private
293 * @interrupt_mask: mask of interrupt bits to update
294 * @enabled_irq_mask: mask of interrupt bits to enable
295 */
296 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
297 uint32_t interrupt_mask,
298 uint32_t enabled_irq_mask)
299 {
300 uint32_t new_val;
301
302 WARN_ON(enabled_irq_mask & ~interrupt_mask);
303
304 assert_spin_locked(&dev_priv->irq_lock);
305
306 new_val = dev_priv->pm_irq_mask;
307 new_val &= ~interrupt_mask;
308 new_val |= (~enabled_irq_mask & interrupt_mask);
309
310 if (new_val != dev_priv->pm_irq_mask) {
311 dev_priv->pm_irq_mask = new_val;
312 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
313 POSTING_READ(gen6_pm_imr(dev_priv));
314 }
315 }
316
317 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
318 {
319 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
320 return;
321
322 snb_update_pm_irq(dev_priv, mask, mask);
323 }
324
325 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
326 uint32_t mask)
327 {
328 snb_update_pm_irq(dev_priv, mask, 0);
329 }
330
331 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
332 {
333 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
334 return;
335
336 __gen6_disable_pm_irq(dev_priv, mask);
337 }
338
339 void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv)
340 {
341 i915_reg_t reg = gen6_pm_iir(dev_priv);
342
343 spin_lock_irq(&dev_priv->irq_lock);
344 I915_WRITE(reg, dev_priv->pm_rps_events);
345 I915_WRITE(reg, dev_priv->pm_rps_events);
346 POSTING_READ(reg);
347 dev_priv->rps.pm_iir = 0;
348 spin_unlock_irq(&dev_priv->irq_lock);
349 }
350
351 void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv)
352 {
353 spin_lock_irq(&dev_priv->irq_lock);
354 WARN_ON_ONCE(dev_priv->rps.pm_iir);
355 WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
356 dev_priv->rps.interrupts_enabled = true;
357 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
358 dev_priv->pm_rps_events);
359 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
360
361 spin_unlock_irq(&dev_priv->irq_lock);
362 }
363
364 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
365 {
366 return (mask & ~dev_priv->rps.pm_intr_keep);
367 }
368
369 void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv)
370 {
371 spin_lock_irq(&dev_priv->irq_lock);
372 dev_priv->rps.interrupts_enabled = false;
373
374 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
375
376 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
377 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
378 ~dev_priv->pm_rps_events);
379
380 spin_unlock_irq(&dev_priv->irq_lock);
381 synchronize_irq(dev_priv->drm.irq);
382
383 /* Now that we will not be generating any more work, flush any
384 * outsanding tasks. As we are called on the RPS idle path,
385 * we will reset the GPU to minimum frequencies, so the current
386 * state of the worker can be discarded.
387 */
388 cancel_work_sync(&dev_priv->rps.work);
389 gen6_reset_rps_interrupts(dev_priv);
390 }
391
392 /**
393 * bdw_update_port_irq - update DE port interrupt
394 * @dev_priv: driver private
395 * @interrupt_mask: mask of interrupt bits to update
396 * @enabled_irq_mask: mask of interrupt bits to enable
397 */
398 static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
399 uint32_t interrupt_mask,
400 uint32_t enabled_irq_mask)
401 {
402 uint32_t new_val;
403 uint32_t old_val;
404
405 assert_spin_locked(&dev_priv->irq_lock);
406
407 WARN_ON(enabled_irq_mask & ~interrupt_mask);
408
409 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
410 return;
411
412 old_val = I915_READ(GEN8_DE_PORT_IMR);
413
414 new_val = old_val;
415 new_val &= ~interrupt_mask;
416 new_val |= (~enabled_irq_mask & interrupt_mask);
417
418 if (new_val != old_val) {
419 I915_WRITE(GEN8_DE_PORT_IMR, new_val);
420 POSTING_READ(GEN8_DE_PORT_IMR);
421 }
422 }
423
424 /**
425 * bdw_update_pipe_irq - update DE pipe interrupt
426 * @dev_priv: driver private
427 * @pipe: pipe whose interrupt to update
428 * @interrupt_mask: mask of interrupt bits to update
429 * @enabled_irq_mask: mask of interrupt bits to enable
430 */
431 void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
432 enum pipe pipe,
433 uint32_t interrupt_mask,
434 uint32_t enabled_irq_mask)
435 {
436 uint32_t new_val;
437
438 assert_spin_locked(&dev_priv->irq_lock);
439
440 WARN_ON(enabled_irq_mask & ~interrupt_mask);
441
442 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
443 return;
444
445 new_val = dev_priv->de_irq_mask[pipe];
446 new_val &= ~interrupt_mask;
447 new_val |= (~enabled_irq_mask & interrupt_mask);
448
449 if (new_val != dev_priv->de_irq_mask[pipe]) {
450 dev_priv->de_irq_mask[pipe] = new_val;
451 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
452 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
453 }
454 }
455
456 /**
457 * ibx_display_interrupt_update - update SDEIMR
458 * @dev_priv: driver private
459 * @interrupt_mask: mask of interrupt bits to update
460 * @enabled_irq_mask: mask of interrupt bits to enable
461 */
462 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
463 uint32_t interrupt_mask,
464 uint32_t enabled_irq_mask)
465 {
466 uint32_t sdeimr = I915_READ(SDEIMR);
467 sdeimr &= ~interrupt_mask;
468 sdeimr |= (~enabled_irq_mask & interrupt_mask);
469
470 WARN_ON(enabled_irq_mask & ~interrupt_mask);
471
472 assert_spin_locked(&dev_priv->irq_lock);
473
474 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
475 return;
476
477 I915_WRITE(SDEIMR, sdeimr);
478 POSTING_READ(SDEIMR);
479 }
480
481 static void
482 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
483 u32 enable_mask, u32 status_mask)
484 {
485 i915_reg_t reg = PIPESTAT(pipe);
486 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
487
488 assert_spin_locked(&dev_priv->irq_lock);
489 WARN_ON(!intel_irqs_enabled(dev_priv));
490
491 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
492 status_mask & ~PIPESTAT_INT_STATUS_MASK,
493 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
494 pipe_name(pipe), enable_mask, status_mask))
495 return;
496
497 if ((pipestat & enable_mask) == enable_mask)
498 return;
499
500 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
501
502 /* Enable the interrupt, clear any pending status */
503 pipestat |= enable_mask | status_mask;
504 I915_WRITE(reg, pipestat);
505 POSTING_READ(reg);
506 }
507
508 static void
509 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
510 u32 enable_mask, u32 status_mask)
511 {
512 i915_reg_t reg = PIPESTAT(pipe);
513 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
514
515 assert_spin_locked(&dev_priv->irq_lock);
516 WARN_ON(!intel_irqs_enabled(dev_priv));
517
518 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
519 status_mask & ~PIPESTAT_INT_STATUS_MASK,
520 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
521 pipe_name(pipe), enable_mask, status_mask))
522 return;
523
524 if ((pipestat & enable_mask) == 0)
525 return;
526
527 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
528
529 pipestat &= ~enable_mask;
530 I915_WRITE(reg, pipestat);
531 POSTING_READ(reg);
532 }
533
534 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
535 {
536 u32 enable_mask = status_mask << 16;
537
538 /*
539 * On pipe A we don't support the PSR interrupt yet,
540 * on pipe B and C the same bit MBZ.
541 */
542 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
543 return 0;
544 /*
545 * On pipe B and C we don't support the PSR interrupt yet, on pipe
546 * A the same bit is for perf counters which we don't use either.
547 */
548 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
549 return 0;
550
551 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
552 SPRITE0_FLIP_DONE_INT_EN_VLV |
553 SPRITE1_FLIP_DONE_INT_EN_VLV);
554 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
555 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
556 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
557 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
558
559 return enable_mask;
560 }
561
562 void
563 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
564 u32 status_mask)
565 {
566 u32 enable_mask;
567
568 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
569 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
570 status_mask);
571 else
572 enable_mask = status_mask << 16;
573 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
574 }
575
576 void
577 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
578 u32 status_mask)
579 {
580 u32 enable_mask;
581
582 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
583 enable_mask = vlv_get_pipestat_enable_mask(&dev_priv->drm,
584 status_mask);
585 else
586 enable_mask = status_mask << 16;
587 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
588 }
589
590 /**
591 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
592 * @dev_priv: i915 device private
593 */
594 static void i915_enable_asle_pipestat(struct drm_i915_private *dev_priv)
595 {
596 if (!dev_priv->opregion.asle || !IS_MOBILE(dev_priv))
597 return;
598
599 spin_lock_irq(&dev_priv->irq_lock);
600
601 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
602 if (INTEL_GEN(dev_priv) >= 4)
603 i915_enable_pipestat(dev_priv, PIPE_A,
604 PIPE_LEGACY_BLC_EVENT_STATUS);
605
606 spin_unlock_irq(&dev_priv->irq_lock);
607 }
608
609 /*
610 * This timing diagram depicts the video signal in and
611 * around the vertical blanking period.
612 *
613 * Assumptions about the fictitious mode used in this example:
614 * vblank_start >= 3
615 * vsync_start = vblank_start + 1
616 * vsync_end = vblank_start + 2
617 * vtotal = vblank_start + 3
618 *
619 * start of vblank:
620 * latch double buffered registers
621 * increment frame counter (ctg+)
622 * generate start of vblank interrupt (gen4+)
623 * |
624 * | frame start:
625 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
626 * | may be shifted forward 1-3 extra lines via PIPECONF
627 * | |
628 * | | start of vsync:
629 * | | generate vsync interrupt
630 * | | |
631 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
632 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
633 * ----va---> <-----------------vb--------------------> <--------va-------------
634 * | | <----vs-----> |
635 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
636 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
637 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
638 * | | |
639 * last visible pixel first visible pixel
640 * | increment frame counter (gen3/4)
641 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
642 *
643 * x = horizontal active
644 * _ = horizontal blanking
645 * hs = horizontal sync
646 * va = vertical active
647 * vb = vertical blanking
648 * vs = vertical sync
649 * vbs = vblank_start (number)
650 *
651 * Summary:
652 * - most events happen at the start of horizontal sync
653 * - frame start happens at the start of horizontal blank, 1-4 lines
654 * (depending on PIPECONF settings) after the start of vblank
655 * - gen3/4 pixel and frame counter are synchronized with the start
656 * of horizontal active on the first line of vertical active
657 */
658
659 /* Called from drm generic code, passed a 'crtc', which
660 * we use as a pipe index
661 */
662 static u32 i915_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
663 {
664 struct drm_i915_private *dev_priv = to_i915(dev);
665 i915_reg_t high_frame, low_frame;
666 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
667 struct intel_crtc *intel_crtc =
668 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
669 const struct drm_display_mode *mode = &intel_crtc->base.hwmode;
670
671 htotal = mode->crtc_htotal;
672 hsync_start = mode->crtc_hsync_start;
673 vbl_start = mode->crtc_vblank_start;
674 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
675 vbl_start = DIV_ROUND_UP(vbl_start, 2);
676
677 /* Convert to pixel count */
678 vbl_start *= htotal;
679
680 /* Start of vblank event occurs at start of hsync */
681 vbl_start -= htotal - hsync_start;
682
683 high_frame = PIPEFRAME(pipe);
684 low_frame = PIPEFRAMEPIXEL(pipe);
685
686 /*
687 * High & low register fields aren't synchronized, so make sure
688 * we get a low value that's stable across two reads of the high
689 * register.
690 */
691 do {
692 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
693 low = I915_READ(low_frame);
694 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
695 } while (high1 != high2);
696
697 high1 >>= PIPE_FRAME_HIGH_SHIFT;
698 pixel = low & PIPE_PIXEL_MASK;
699 low >>= PIPE_FRAME_LOW_SHIFT;
700
701 /*
702 * The frame counter increments at beginning of active.
703 * Cook up a vblank counter by also checking the pixel
704 * counter against vblank start.
705 */
706 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
707 }
708
709 static u32 g4x_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
710 {
711 struct drm_i915_private *dev_priv = to_i915(dev);
712
713 return I915_READ(PIPE_FRMCOUNT_G4X(pipe));
714 }
715
716 /* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
717 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
718 {
719 struct drm_device *dev = crtc->base.dev;
720 struct drm_i915_private *dev_priv = to_i915(dev);
721 const struct drm_display_mode *mode = &crtc->base.hwmode;
722 enum pipe pipe = crtc->pipe;
723 int position, vtotal;
724
725 vtotal = mode->crtc_vtotal;
726 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
727 vtotal /= 2;
728
729 if (IS_GEN2(dev_priv))
730 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
731 else
732 position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
733
734 /*
735 * On HSW, the DSL reg (0x70000) appears to return 0 if we
736 * read it just before the start of vblank. So try it again
737 * so we don't accidentally end up spanning a vblank frame
738 * increment, causing the pipe_update_end() code to squak at us.
739 *
740 * The nature of this problem means we can't simply check the ISR
741 * bit and return the vblank start value; nor can we use the scanline
742 * debug register in the transcoder as it appears to have the same
743 * problem. We may need to extend this to include other platforms,
744 * but so far testing only shows the problem on HSW.
745 */
746 if (HAS_DDI(dev_priv) && !position) {
747 int i, temp;
748
749 for (i = 0; i < 100; i++) {
750 udelay(1);
751 temp = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) &
752 DSL_LINEMASK_GEN3;
753 if (temp != position) {
754 position = temp;
755 break;
756 }
757 }
758 }
759
760 /*
761 * See update_scanline_offset() for the details on the
762 * scanline_offset adjustment.
763 */
764 return (position + crtc->scanline_offset) % vtotal;
765 }
766
767 static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
768 unsigned int flags, int *vpos, int *hpos,
769 ktime_t *stime, ktime_t *etime,
770 const struct drm_display_mode *mode)
771 {
772 struct drm_i915_private *dev_priv = to_i915(dev);
773 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
774 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
775 int position;
776 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
777 bool in_vbl = true;
778 int ret = 0;
779 unsigned long irqflags;
780
781 if (WARN_ON(!mode->crtc_clock)) {
782 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
783 "pipe %c\n", pipe_name(pipe));
784 return 0;
785 }
786
787 htotal = mode->crtc_htotal;
788 hsync_start = mode->crtc_hsync_start;
789 vtotal = mode->crtc_vtotal;
790 vbl_start = mode->crtc_vblank_start;
791 vbl_end = mode->crtc_vblank_end;
792
793 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
794 vbl_start = DIV_ROUND_UP(vbl_start, 2);
795 vbl_end /= 2;
796 vtotal /= 2;
797 }
798
799 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
800
801 /*
802 * Lock uncore.lock, as we will do multiple timing critical raw
803 * register reads, potentially with preemption disabled, so the
804 * following code must not block on uncore.lock.
805 */
806 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
807
808 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
809
810 /* Get optional system timestamp before query. */
811 if (stime)
812 *stime = ktime_get();
813
814 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
815 /* No obvious pixelcount register. Only query vertical
816 * scanout position from Display scan line register.
817 */
818 position = __intel_get_crtc_scanline(intel_crtc);
819 } else {
820 /* Have access to pixelcount since start of frame.
821 * We can split this into vertical and horizontal
822 * scanout position.
823 */
824 position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
825
826 /* convert to pixel counts */
827 vbl_start *= htotal;
828 vbl_end *= htotal;
829 vtotal *= htotal;
830
831 /*
832 * In interlaced modes, the pixel counter counts all pixels,
833 * so one field will have htotal more pixels. In order to avoid
834 * the reported position from jumping backwards when the pixel
835 * counter is beyond the length of the shorter field, just
836 * clamp the position the length of the shorter field. This
837 * matches how the scanline counter based position works since
838 * the scanline counter doesn't count the two half lines.
839 */
840 if (position >= vtotal)
841 position = vtotal - 1;
842
843 /*
844 * Start of vblank interrupt is triggered at start of hsync,
845 * just prior to the first active line of vblank. However we
846 * consider lines to start at the leading edge of horizontal
847 * active. So, should we get here before we've crossed into
848 * the horizontal active of the first line in vblank, we would
849 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
850 * always add htotal-hsync_start to the current pixel position.
851 */
852 position = (position + htotal - hsync_start) % vtotal;
853 }
854
855 /* Get optional system timestamp after query. */
856 if (etime)
857 *etime = ktime_get();
858
859 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
860
861 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
862
863 in_vbl = position >= vbl_start && position < vbl_end;
864
865 /*
866 * While in vblank, position will be negative
867 * counting up towards 0 at vbl_end. And outside
868 * vblank, position will be positive counting
869 * up since vbl_end.
870 */
871 if (position >= vbl_start)
872 position -= vbl_end;
873 else
874 position += vtotal - vbl_end;
875
876 if (IS_GEN2(dev_priv) || IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5) {
877 *vpos = position;
878 *hpos = 0;
879 } else {
880 *vpos = position / htotal;
881 *hpos = position - (*vpos * htotal);
882 }
883
884 /* In vblank? */
885 if (in_vbl)
886 ret |= DRM_SCANOUTPOS_IN_VBLANK;
887
888 return ret;
889 }
890
891 int intel_get_crtc_scanline(struct intel_crtc *crtc)
892 {
893 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
894 unsigned long irqflags;
895 int position;
896
897 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
898 position = __intel_get_crtc_scanline(crtc);
899 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
900
901 return position;
902 }
903
904 static int i915_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
905 int *max_error,
906 struct timeval *vblank_time,
907 unsigned flags)
908 {
909 struct drm_crtc *crtc;
910
911 if (pipe >= INTEL_INFO(dev)->num_pipes) {
912 DRM_ERROR("Invalid crtc %u\n", pipe);
913 return -EINVAL;
914 }
915
916 /* Get drm_crtc to timestamp: */
917 crtc = intel_get_crtc_for_pipe(dev, pipe);
918 if (crtc == NULL) {
919 DRM_ERROR("Invalid crtc %u\n", pipe);
920 return -EINVAL;
921 }
922
923 if (!crtc->hwmode.crtc_clock) {
924 DRM_DEBUG_KMS("crtc %u is disabled\n", pipe);
925 return -EBUSY;
926 }
927
928 /* Helper routine in DRM core does all the work: */
929 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
930 vblank_time, flags,
931 &crtc->hwmode);
932 }
933
934 static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv)
935 {
936 u32 busy_up, busy_down, max_avg, min_avg;
937 u8 new_delay;
938
939 spin_lock(&mchdev_lock);
940
941 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
942
943 new_delay = dev_priv->ips.cur_delay;
944
945 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
946 busy_up = I915_READ(RCPREVBSYTUPAVG);
947 busy_down = I915_READ(RCPREVBSYTDNAVG);
948 max_avg = I915_READ(RCBMAXAVG);
949 min_avg = I915_READ(RCBMINAVG);
950
951 /* Handle RCS change request from hw */
952 if (busy_up > max_avg) {
953 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
954 new_delay = dev_priv->ips.cur_delay - 1;
955 if (new_delay < dev_priv->ips.max_delay)
956 new_delay = dev_priv->ips.max_delay;
957 } else if (busy_down < min_avg) {
958 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
959 new_delay = dev_priv->ips.cur_delay + 1;
960 if (new_delay > dev_priv->ips.min_delay)
961 new_delay = dev_priv->ips.min_delay;
962 }
963
964 if (ironlake_set_drps(dev_priv, new_delay))
965 dev_priv->ips.cur_delay = new_delay;
966
967 spin_unlock(&mchdev_lock);
968
969 return;
970 }
971
972 static void notify_ring(struct intel_engine_cs *engine)
973 {
974 smp_store_mb(engine->breadcrumbs.irq_posted, true);
975 if (intel_engine_wakeup(engine))
976 trace_i915_gem_request_notify(engine);
977 }
978
979 static void vlv_c0_read(struct drm_i915_private *dev_priv,
980 struct intel_rps_ei *ei)
981 {
982 ei->cz_clock = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
983 ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT);
984 ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT);
985 }
986
987 static bool vlv_c0_above(struct drm_i915_private *dev_priv,
988 const struct intel_rps_ei *old,
989 const struct intel_rps_ei *now,
990 int threshold)
991 {
992 u64 time, c0;
993 unsigned int mul = 100;
994
995 if (old->cz_clock == 0)
996 return false;
997
998 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
999 mul <<= 8;
1000
1001 time = now->cz_clock - old->cz_clock;
1002 time *= threshold * dev_priv->czclk_freq;
1003
1004 /* Workload can be split between render + media, e.g. SwapBuffers
1005 * being blitted in X after being rendered in mesa. To account for
1006 * this we need to combine both engines into our activity counter.
1007 */
1008 c0 = now->render_c0 - old->render_c0;
1009 c0 += now->media_c0 - old->media_c0;
1010 c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC;
1011
1012 return c0 >= time;
1013 }
1014
1015 void gen6_rps_reset_ei(struct drm_i915_private *dev_priv)
1016 {
1017 vlv_c0_read(dev_priv, &dev_priv->rps.down_ei);
1018 dev_priv->rps.up_ei = dev_priv->rps.down_ei;
1019 }
1020
1021 static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir)
1022 {
1023 struct intel_rps_ei now;
1024 u32 events = 0;
1025
1026 if ((pm_iir & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED)) == 0)
1027 return 0;
1028
1029 vlv_c0_read(dev_priv, &now);
1030 if (now.cz_clock == 0)
1031 return 0;
1032
1033 if (pm_iir & GEN6_PM_RP_DOWN_EI_EXPIRED) {
1034 if (!vlv_c0_above(dev_priv,
1035 &dev_priv->rps.down_ei, &now,
1036 dev_priv->rps.down_threshold))
1037 events |= GEN6_PM_RP_DOWN_THRESHOLD;
1038 dev_priv->rps.down_ei = now;
1039 }
1040
1041 if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1042 if (vlv_c0_above(dev_priv,
1043 &dev_priv->rps.up_ei, &now,
1044 dev_priv->rps.up_threshold))
1045 events |= GEN6_PM_RP_UP_THRESHOLD;
1046 dev_priv->rps.up_ei = now;
1047 }
1048
1049 return events;
1050 }
1051
1052 static bool any_waiters(struct drm_i915_private *dev_priv)
1053 {
1054 struct intel_engine_cs *engine;
1055
1056 for_each_engine(engine, dev_priv)
1057 if (intel_engine_has_waiter(engine))
1058 return true;
1059
1060 return false;
1061 }
1062
1063 static void gen6_pm_rps_work(struct work_struct *work)
1064 {
1065 struct drm_i915_private *dev_priv =
1066 container_of(work, struct drm_i915_private, rps.work);
1067 bool client_boost;
1068 int new_delay, adj, min, max;
1069 u32 pm_iir;
1070
1071 spin_lock_irq(&dev_priv->irq_lock);
1072 /* Speed up work cancelation during disabling rps interrupts. */
1073 if (!dev_priv->rps.interrupts_enabled) {
1074 spin_unlock_irq(&dev_priv->irq_lock);
1075 return;
1076 }
1077
1078 pm_iir = dev_priv->rps.pm_iir;
1079 dev_priv->rps.pm_iir = 0;
1080 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1081 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1082 client_boost = dev_priv->rps.client_boost;
1083 dev_priv->rps.client_boost = false;
1084 spin_unlock_irq(&dev_priv->irq_lock);
1085
1086 /* Make sure we didn't queue anything we're not going to process. */
1087 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1088
1089 if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost)
1090 return;
1091
1092 mutex_lock(&dev_priv->rps.hw_lock);
1093
1094 pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir);
1095
1096 adj = dev_priv->rps.last_adj;
1097 new_delay = dev_priv->rps.cur_freq;
1098 min = dev_priv->rps.min_freq_softlimit;
1099 max = dev_priv->rps.max_freq_softlimit;
1100 if (client_boost || any_waiters(dev_priv))
1101 max = dev_priv->rps.max_freq;
1102 if (client_boost && new_delay < dev_priv->rps.boost_freq) {
1103 new_delay = dev_priv->rps.boost_freq;
1104 adj = 0;
1105 } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1106 if (adj > 0)
1107 adj *= 2;
1108 else /* CHV needs even encode values */
1109 adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1;
1110 /*
1111 * For better performance, jump directly
1112 * to RPe if we're below it.
1113 */
1114 if (new_delay < dev_priv->rps.efficient_freq - adj) {
1115 new_delay = dev_priv->rps.efficient_freq;
1116 adj = 0;
1117 }
1118 } else if (client_boost || any_waiters(dev_priv)) {
1119 adj = 0;
1120 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1121 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1122 new_delay = dev_priv->rps.efficient_freq;
1123 else
1124 new_delay = dev_priv->rps.min_freq_softlimit;
1125 adj = 0;
1126 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1127 if (adj < 0)
1128 adj *= 2;
1129 else /* CHV needs even encode values */
1130 adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1;
1131 } else { /* unknown event */
1132 adj = 0;
1133 }
1134
1135 dev_priv->rps.last_adj = adj;
1136
1137 /* sysfs frequency interfaces may have snuck in while servicing the
1138 * interrupt
1139 */
1140 new_delay += adj;
1141 new_delay = clamp_t(int, new_delay, min, max);
1142
1143 intel_set_rps(dev_priv, new_delay);
1144
1145 mutex_unlock(&dev_priv->rps.hw_lock);
1146 }
1147
1148
1149 /**
1150 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1151 * occurred.
1152 * @work: workqueue struct
1153 *
1154 * Doesn't actually do anything except notify userspace. As a consequence of
1155 * this event, userspace should try to remap the bad rows since statistically
1156 * it is likely the same row is more likely to go bad again.
1157 */
1158 static void ivybridge_parity_work(struct work_struct *work)
1159 {
1160 struct drm_i915_private *dev_priv =
1161 container_of(work, struct drm_i915_private, l3_parity.error_work);
1162 u32 error_status, row, bank, subbank;
1163 char *parity_event[6];
1164 uint32_t misccpctl;
1165 uint8_t slice = 0;
1166
1167 /* We must turn off DOP level clock gating to access the L3 registers.
1168 * In order to prevent a get/put style interface, acquire struct mutex
1169 * any time we access those registers.
1170 */
1171 mutex_lock(&dev_priv->drm.struct_mutex);
1172
1173 /* If we've screwed up tracking, just let the interrupt fire again */
1174 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1175 goto out;
1176
1177 misccpctl = I915_READ(GEN7_MISCCPCTL);
1178 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1179 POSTING_READ(GEN7_MISCCPCTL);
1180
1181 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1182 i915_reg_t reg;
1183
1184 slice--;
1185 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
1186 break;
1187
1188 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1189
1190 reg = GEN7_L3CDERRST1(slice);
1191
1192 error_status = I915_READ(reg);
1193 row = GEN7_PARITY_ERROR_ROW(error_status);
1194 bank = GEN7_PARITY_ERROR_BANK(error_status);
1195 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1196
1197 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1198 POSTING_READ(reg);
1199
1200 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1201 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1202 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1203 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1204 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1205 parity_event[5] = NULL;
1206
1207 kobject_uevent_env(&dev_priv->drm.primary->kdev->kobj,
1208 KOBJ_CHANGE, parity_event);
1209
1210 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1211 slice, row, bank, subbank);
1212
1213 kfree(parity_event[4]);
1214 kfree(parity_event[3]);
1215 kfree(parity_event[2]);
1216 kfree(parity_event[1]);
1217 }
1218
1219 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1220
1221 out:
1222 WARN_ON(dev_priv->l3_parity.which_slice);
1223 spin_lock_irq(&dev_priv->irq_lock);
1224 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1225 spin_unlock_irq(&dev_priv->irq_lock);
1226
1227 mutex_unlock(&dev_priv->drm.struct_mutex);
1228 }
1229
1230 static void ivybridge_parity_error_irq_handler(struct drm_i915_private *dev_priv,
1231 u32 iir)
1232 {
1233 if (!HAS_L3_DPF(dev_priv))
1234 return;
1235
1236 spin_lock(&dev_priv->irq_lock);
1237 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv));
1238 spin_unlock(&dev_priv->irq_lock);
1239
1240 iir &= GT_PARITY_ERROR(dev_priv);
1241 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1242 dev_priv->l3_parity.which_slice |= 1 << 1;
1243
1244 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1245 dev_priv->l3_parity.which_slice |= 1 << 0;
1246
1247 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1248 }
1249
1250 static void ilk_gt_irq_handler(struct drm_i915_private *dev_priv,
1251 u32 gt_iir)
1252 {
1253 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1254 notify_ring(&dev_priv->engine[RCS]);
1255 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1256 notify_ring(&dev_priv->engine[VCS]);
1257 }
1258
1259 static void snb_gt_irq_handler(struct drm_i915_private *dev_priv,
1260 u32 gt_iir)
1261 {
1262 if (gt_iir & GT_RENDER_USER_INTERRUPT)
1263 notify_ring(&dev_priv->engine[RCS]);
1264 if (gt_iir & GT_BSD_USER_INTERRUPT)
1265 notify_ring(&dev_priv->engine[VCS]);
1266 if (gt_iir & GT_BLT_USER_INTERRUPT)
1267 notify_ring(&dev_priv->engine[BCS]);
1268
1269 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1270 GT_BSD_CS_ERROR_INTERRUPT |
1271 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1272 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1273
1274 if (gt_iir & GT_PARITY_ERROR(dev_priv))
1275 ivybridge_parity_error_irq_handler(dev_priv, gt_iir);
1276 }
1277
1278 static __always_inline void
1279 gen8_cs_irq_handler(struct intel_engine_cs *engine, u32 iir, int test_shift)
1280 {
1281 if (iir & (GT_RENDER_USER_INTERRUPT << test_shift))
1282 notify_ring(engine);
1283 if (iir & (GT_CONTEXT_SWITCH_INTERRUPT << test_shift))
1284 tasklet_schedule(&engine->irq_tasklet);
1285 }
1286
1287 static irqreturn_t gen8_gt_irq_ack(struct drm_i915_private *dev_priv,
1288 u32 master_ctl,
1289 u32 gt_iir[4])
1290 {
1291 irqreturn_t ret = IRQ_NONE;
1292
1293 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1294 gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0));
1295 if (gt_iir[0]) {
1296 I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]);
1297 ret = IRQ_HANDLED;
1298 } else
1299 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1300 }
1301
1302 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1303 gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1));
1304 if (gt_iir[1]) {
1305 I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]);
1306 ret = IRQ_HANDLED;
1307 } else
1308 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1309 }
1310
1311 if (master_ctl & GEN8_GT_VECS_IRQ) {
1312 gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3));
1313 if (gt_iir[3]) {
1314 I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]);
1315 ret = IRQ_HANDLED;
1316 } else
1317 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1318 }
1319
1320 if (master_ctl & GEN8_GT_PM_IRQ) {
1321 gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2));
1322 if (gt_iir[2] & dev_priv->pm_rps_events) {
1323 I915_WRITE_FW(GEN8_GT_IIR(2),
1324 gt_iir[2] & dev_priv->pm_rps_events);
1325 ret = IRQ_HANDLED;
1326 } else
1327 DRM_ERROR("The master control interrupt lied (PM)!\n");
1328 }
1329
1330 return ret;
1331 }
1332
1333 static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv,
1334 u32 gt_iir[4])
1335 {
1336 if (gt_iir[0]) {
1337 gen8_cs_irq_handler(&dev_priv->engine[RCS],
1338 gt_iir[0], GEN8_RCS_IRQ_SHIFT);
1339 gen8_cs_irq_handler(&dev_priv->engine[BCS],
1340 gt_iir[0], GEN8_BCS_IRQ_SHIFT);
1341 }
1342
1343 if (gt_iir[1]) {
1344 gen8_cs_irq_handler(&dev_priv->engine[VCS],
1345 gt_iir[1], GEN8_VCS1_IRQ_SHIFT);
1346 gen8_cs_irq_handler(&dev_priv->engine[VCS2],
1347 gt_iir[1], GEN8_VCS2_IRQ_SHIFT);
1348 }
1349
1350 if (gt_iir[3])
1351 gen8_cs_irq_handler(&dev_priv->engine[VECS],
1352 gt_iir[3], GEN8_VECS_IRQ_SHIFT);
1353
1354 if (gt_iir[2] & dev_priv->pm_rps_events)
1355 gen6_rps_irq_handler(dev_priv, gt_iir[2]);
1356 }
1357
1358 static bool bxt_port_hotplug_long_detect(enum port port, u32 val)
1359 {
1360 switch (port) {
1361 case PORT_A:
1362 return val & PORTA_HOTPLUG_LONG_DETECT;
1363 case PORT_B:
1364 return val & PORTB_HOTPLUG_LONG_DETECT;
1365 case PORT_C:
1366 return val & PORTC_HOTPLUG_LONG_DETECT;
1367 default:
1368 return false;
1369 }
1370 }
1371
1372 static bool spt_port_hotplug2_long_detect(enum port port, u32 val)
1373 {
1374 switch (port) {
1375 case PORT_E:
1376 return val & PORTE_HOTPLUG_LONG_DETECT;
1377 default:
1378 return false;
1379 }
1380 }
1381
1382 static bool spt_port_hotplug_long_detect(enum port port, u32 val)
1383 {
1384 switch (port) {
1385 case PORT_A:
1386 return val & PORTA_HOTPLUG_LONG_DETECT;
1387 case PORT_B:
1388 return val & PORTB_HOTPLUG_LONG_DETECT;
1389 case PORT_C:
1390 return val & PORTC_HOTPLUG_LONG_DETECT;
1391 case PORT_D:
1392 return val & PORTD_HOTPLUG_LONG_DETECT;
1393 default:
1394 return false;
1395 }
1396 }
1397
1398 static bool ilk_port_hotplug_long_detect(enum port port, u32 val)
1399 {
1400 switch (port) {
1401 case PORT_A:
1402 return val & DIGITAL_PORTA_HOTPLUG_LONG_DETECT;
1403 default:
1404 return false;
1405 }
1406 }
1407
1408 static bool pch_port_hotplug_long_detect(enum port port, u32 val)
1409 {
1410 switch (port) {
1411 case PORT_B:
1412 return val & PORTB_HOTPLUG_LONG_DETECT;
1413 case PORT_C:
1414 return val & PORTC_HOTPLUG_LONG_DETECT;
1415 case PORT_D:
1416 return val & PORTD_HOTPLUG_LONG_DETECT;
1417 default:
1418 return false;
1419 }
1420 }
1421
1422 static bool i9xx_port_hotplug_long_detect(enum port port, u32 val)
1423 {
1424 switch (port) {
1425 case PORT_B:
1426 return val & PORTB_HOTPLUG_INT_LONG_PULSE;
1427 case PORT_C:
1428 return val & PORTC_HOTPLUG_INT_LONG_PULSE;
1429 case PORT_D:
1430 return val & PORTD_HOTPLUG_INT_LONG_PULSE;
1431 default:
1432 return false;
1433 }
1434 }
1435
1436 /*
1437 * Get a bit mask of pins that have triggered, and which ones may be long.
1438 * This can be called multiple times with the same masks to accumulate
1439 * hotplug detection results from several registers.
1440 *
1441 * Note that the caller is expected to zero out the masks initially.
1442 */
1443 static void intel_get_hpd_pins(u32 *pin_mask, u32 *long_mask,
1444 u32 hotplug_trigger, u32 dig_hotplug_reg,
1445 const u32 hpd[HPD_NUM_PINS],
1446 bool long_pulse_detect(enum port port, u32 val))
1447 {
1448 enum port port;
1449 int i;
1450
1451 for_each_hpd_pin(i) {
1452 if ((hpd[i] & hotplug_trigger) == 0)
1453 continue;
1454
1455 *pin_mask |= BIT(i);
1456
1457 if (!intel_hpd_pin_to_port(i, &port))
1458 continue;
1459
1460 if (long_pulse_detect(port, dig_hotplug_reg))
1461 *long_mask |= BIT(i);
1462 }
1463
1464 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x\n",
1465 hotplug_trigger, dig_hotplug_reg, *pin_mask);
1466
1467 }
1468
1469 static void gmbus_irq_handler(struct drm_i915_private *dev_priv)
1470 {
1471 wake_up_all(&dev_priv->gmbus_wait_queue);
1472 }
1473
1474 static void dp_aux_irq_handler(struct drm_i915_private *dev_priv)
1475 {
1476 wake_up_all(&dev_priv->gmbus_wait_queue);
1477 }
1478
1479 #if defined(CONFIG_DEBUG_FS)
1480 static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1481 enum pipe pipe,
1482 uint32_t crc0, uint32_t crc1,
1483 uint32_t crc2, uint32_t crc3,
1484 uint32_t crc4)
1485 {
1486 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1487 struct intel_pipe_crc_entry *entry;
1488 int head, tail;
1489
1490 spin_lock(&pipe_crc->lock);
1491
1492 if (!pipe_crc->entries) {
1493 spin_unlock(&pipe_crc->lock);
1494 DRM_DEBUG_KMS("spurious interrupt\n");
1495 return;
1496 }
1497
1498 head = pipe_crc->head;
1499 tail = pipe_crc->tail;
1500
1501 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1502 spin_unlock(&pipe_crc->lock);
1503 DRM_ERROR("CRC buffer overflowing\n");
1504 return;
1505 }
1506
1507 entry = &pipe_crc->entries[head];
1508
1509 entry->frame = dev_priv->drm.driver->get_vblank_counter(&dev_priv->drm,
1510 pipe);
1511 entry->crc[0] = crc0;
1512 entry->crc[1] = crc1;
1513 entry->crc[2] = crc2;
1514 entry->crc[3] = crc3;
1515 entry->crc[4] = crc4;
1516
1517 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1518 pipe_crc->head = head;
1519
1520 spin_unlock(&pipe_crc->lock);
1521
1522 wake_up_interruptible(&pipe_crc->wq);
1523 }
1524 #else
1525 static inline void
1526 display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1527 enum pipe pipe,
1528 uint32_t crc0, uint32_t crc1,
1529 uint32_t crc2, uint32_t crc3,
1530 uint32_t crc4) {}
1531 #endif
1532
1533
1534 static void hsw_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1535 enum pipe pipe)
1536 {
1537 display_pipe_crc_irq_handler(dev_priv, pipe,
1538 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1539 0, 0, 0, 0);
1540 }
1541
1542 static void ivb_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1543 enum pipe pipe)
1544 {
1545 display_pipe_crc_irq_handler(dev_priv, pipe,
1546 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1547 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1548 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1549 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1550 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1551 }
1552
1553 static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
1554 enum pipe pipe)
1555 {
1556 uint32_t res1, res2;
1557
1558 if (INTEL_GEN(dev_priv) >= 3)
1559 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1560 else
1561 res1 = 0;
1562
1563 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1564 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1565 else
1566 res2 = 0;
1567
1568 display_pipe_crc_irq_handler(dev_priv, pipe,
1569 I915_READ(PIPE_CRC_RES_RED(pipe)),
1570 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1571 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1572 res1, res2);
1573 }
1574
1575 /* The RPS events need forcewake, so we add them to a work queue and mask their
1576 * IMR bits until the work is done. Other interrupts can be processed without
1577 * the work queue. */
1578 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1579 {
1580 if (pm_iir & dev_priv->pm_rps_events) {
1581 spin_lock(&dev_priv->irq_lock);
1582 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1583 if (dev_priv->rps.interrupts_enabled) {
1584 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1585 schedule_work(&dev_priv->rps.work);
1586 }
1587 spin_unlock(&dev_priv->irq_lock);
1588 }
1589
1590 if (INTEL_INFO(dev_priv)->gen >= 8)
1591 return;
1592
1593 if (HAS_VEBOX(dev_priv)) {
1594 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1595 notify_ring(&dev_priv->engine[VECS]);
1596
1597 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1598 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1599 }
1600 }
1601
1602 static bool intel_pipe_handle_vblank(struct drm_i915_private *dev_priv,
1603 enum pipe pipe)
1604 {
1605 bool ret;
1606
1607 ret = drm_handle_vblank(&dev_priv->drm, pipe);
1608 if (ret)
1609 intel_finish_page_flip_mmio(dev_priv, pipe);
1610
1611 return ret;
1612 }
1613
1614 static void valleyview_pipestat_irq_ack(struct drm_i915_private *dev_priv,
1615 u32 iir, u32 pipe_stats[I915_MAX_PIPES])
1616 {
1617 int pipe;
1618
1619 spin_lock(&dev_priv->irq_lock);
1620
1621 if (!dev_priv->display_irqs_enabled) {
1622 spin_unlock(&dev_priv->irq_lock);
1623 return;
1624 }
1625
1626 for_each_pipe(dev_priv, pipe) {
1627 i915_reg_t reg;
1628 u32 mask, iir_bit = 0;
1629
1630 /*
1631 * PIPESTAT bits get signalled even when the interrupt is
1632 * disabled with the mask bits, and some of the status bits do
1633 * not generate interrupts at all (like the underrun bit). Hence
1634 * we need to be careful that we only handle what we want to
1635 * handle.
1636 */
1637
1638 /* fifo underruns are filterered in the underrun handler. */
1639 mask = PIPE_FIFO_UNDERRUN_STATUS;
1640
1641 switch (pipe) {
1642 case PIPE_A:
1643 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1644 break;
1645 case PIPE_B:
1646 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1647 break;
1648 case PIPE_C:
1649 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1650 break;
1651 }
1652 if (iir & iir_bit)
1653 mask |= dev_priv->pipestat_irq_mask[pipe];
1654
1655 if (!mask)
1656 continue;
1657
1658 reg = PIPESTAT(pipe);
1659 mask |= PIPESTAT_INT_ENABLE_MASK;
1660 pipe_stats[pipe] = I915_READ(reg) & mask;
1661
1662 /*
1663 * Clear the PIPE*STAT regs before the IIR
1664 */
1665 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1666 PIPESTAT_INT_STATUS_MASK))
1667 I915_WRITE(reg, pipe_stats[pipe]);
1668 }
1669 spin_unlock(&dev_priv->irq_lock);
1670 }
1671
1672 static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
1673 u32 pipe_stats[I915_MAX_PIPES])
1674 {
1675 enum pipe pipe;
1676
1677 for_each_pipe(dev_priv, pipe) {
1678 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1679 intel_pipe_handle_vblank(dev_priv, pipe))
1680 intel_check_page_flip(dev_priv, pipe);
1681
1682 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV)
1683 intel_finish_page_flip_cs(dev_priv, pipe);
1684
1685 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1686 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
1687
1688 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1689 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1690 }
1691
1692 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1693 gmbus_irq_handler(dev_priv);
1694 }
1695
1696 static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
1697 {
1698 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1699
1700 if (hotplug_status)
1701 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1702
1703 return hotplug_status;
1704 }
1705
1706 static void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1707 u32 hotplug_status)
1708 {
1709 u32 pin_mask = 0, long_mask = 0;
1710
1711 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
1712 IS_CHERRYVIEW(dev_priv)) {
1713 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1714
1715 if (hotplug_trigger) {
1716 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1717 hotplug_trigger, hpd_status_g4x,
1718 i9xx_port_hotplug_long_detect);
1719
1720 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1721 }
1722
1723 if (hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1724 dp_aux_irq_handler(dev_priv);
1725 } else {
1726 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1727
1728 if (hotplug_trigger) {
1729 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1730 hotplug_trigger, hpd_status_i915,
1731 i9xx_port_hotplug_long_detect);
1732 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1733 }
1734 }
1735 }
1736
1737 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1738 {
1739 struct drm_device *dev = arg;
1740 struct drm_i915_private *dev_priv = to_i915(dev);
1741 irqreturn_t ret = IRQ_NONE;
1742
1743 if (!intel_irqs_enabled(dev_priv))
1744 return IRQ_NONE;
1745
1746 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1747 disable_rpm_wakeref_asserts(dev_priv);
1748
1749 do {
1750 u32 iir, gt_iir, pm_iir;
1751 u32 pipe_stats[I915_MAX_PIPES] = {};
1752 u32 hotplug_status = 0;
1753 u32 ier = 0;
1754
1755 gt_iir = I915_READ(GTIIR);
1756 pm_iir = I915_READ(GEN6_PMIIR);
1757 iir = I915_READ(VLV_IIR);
1758
1759 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1760 break;
1761
1762 ret = IRQ_HANDLED;
1763
1764 /*
1765 * Theory on interrupt generation, based on empirical evidence:
1766 *
1767 * x = ((VLV_IIR & VLV_IER) ||
1768 * (((GT_IIR & GT_IER) || (GEN6_PMIIR & GEN6_PMIER)) &&
1769 * (VLV_MASTER_IER & MASTER_INTERRUPT_ENABLE)));
1770 *
1771 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1772 * Hence we clear MASTER_INTERRUPT_ENABLE and VLV_IER to
1773 * guarantee the CPU interrupt will be raised again even if we
1774 * don't end up clearing all the VLV_IIR, GT_IIR, GEN6_PMIIR
1775 * bits this time around.
1776 */
1777 I915_WRITE(VLV_MASTER_IER, 0);
1778 ier = I915_READ(VLV_IER);
1779 I915_WRITE(VLV_IER, 0);
1780
1781 if (gt_iir)
1782 I915_WRITE(GTIIR, gt_iir);
1783 if (pm_iir)
1784 I915_WRITE(GEN6_PMIIR, pm_iir);
1785
1786 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1787 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1788
1789 /* Call regardless, as some status bits might not be
1790 * signalled in iir */
1791 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1792
1793 /*
1794 * VLV_IIR is single buffered, and reflects the level
1795 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1796 */
1797 if (iir)
1798 I915_WRITE(VLV_IIR, iir);
1799
1800 I915_WRITE(VLV_IER, ier);
1801 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
1802 POSTING_READ(VLV_MASTER_IER);
1803
1804 if (gt_iir)
1805 snb_gt_irq_handler(dev_priv, gt_iir);
1806 if (pm_iir)
1807 gen6_rps_irq_handler(dev_priv, pm_iir);
1808
1809 if (hotplug_status)
1810 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1811
1812 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1813 } while (0);
1814
1815 enable_rpm_wakeref_asserts(dev_priv);
1816
1817 return ret;
1818 }
1819
1820 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1821 {
1822 struct drm_device *dev = arg;
1823 struct drm_i915_private *dev_priv = to_i915(dev);
1824 irqreturn_t ret = IRQ_NONE;
1825
1826 if (!intel_irqs_enabled(dev_priv))
1827 return IRQ_NONE;
1828
1829 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
1830 disable_rpm_wakeref_asserts(dev_priv);
1831
1832 do {
1833 u32 master_ctl, iir;
1834 u32 gt_iir[4] = {};
1835 u32 pipe_stats[I915_MAX_PIPES] = {};
1836 u32 hotplug_status = 0;
1837 u32 ier = 0;
1838
1839 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1840 iir = I915_READ(VLV_IIR);
1841
1842 if (master_ctl == 0 && iir == 0)
1843 break;
1844
1845 ret = IRQ_HANDLED;
1846
1847 /*
1848 * Theory on interrupt generation, based on empirical evidence:
1849 *
1850 * x = ((VLV_IIR & VLV_IER) ||
1851 * ((GEN8_MASTER_IRQ & ~GEN8_MASTER_IRQ_CONTROL) &&
1852 * (GEN8_MASTER_IRQ & GEN8_MASTER_IRQ_CONTROL)));
1853 *
1854 * A CPU interrupt will only be raised when 'x' has a 0->1 edge.
1855 * Hence we clear GEN8_MASTER_IRQ_CONTROL and VLV_IER to
1856 * guarantee the CPU interrupt will be raised again even if we
1857 * don't end up clearing all the VLV_IIR and GEN8_MASTER_IRQ_CONTROL
1858 * bits this time around.
1859 */
1860 I915_WRITE(GEN8_MASTER_IRQ, 0);
1861 ier = I915_READ(VLV_IER);
1862 I915_WRITE(VLV_IER, 0);
1863
1864 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
1865
1866 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1867 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
1868
1869 /* Call regardless, as some status bits might not be
1870 * signalled in iir */
1871 valleyview_pipestat_irq_ack(dev_priv, iir, pipe_stats);
1872
1873 /*
1874 * VLV_IIR is single buffered, and reflects the level
1875 * from PIPESTAT/PORT_HOTPLUG_STAT, hence clear it last.
1876 */
1877 if (iir)
1878 I915_WRITE(VLV_IIR, iir);
1879
1880 I915_WRITE(VLV_IER, ier);
1881 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
1882 POSTING_READ(GEN8_MASTER_IRQ);
1883
1884 gen8_gt_irq_handler(dev_priv, gt_iir);
1885
1886 if (hotplug_status)
1887 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
1888
1889 valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
1890 } while (0);
1891
1892 enable_rpm_wakeref_asserts(dev_priv);
1893
1894 return ret;
1895 }
1896
1897 static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv,
1898 u32 hotplug_trigger,
1899 const u32 hpd[HPD_NUM_PINS])
1900 {
1901 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
1902
1903 /*
1904 * Somehow the PCH doesn't seem to really ack the interrupt to the CPU
1905 * unless we touch the hotplug register, even if hotplug_trigger is
1906 * zero. Not acking leads to "The master control interrupt lied (SDE)!"
1907 * errors.
1908 */
1909 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1910 if (!hotplug_trigger) {
1911 u32 mask = PORTA_HOTPLUG_STATUS_MASK |
1912 PORTD_HOTPLUG_STATUS_MASK |
1913 PORTC_HOTPLUG_STATUS_MASK |
1914 PORTB_HOTPLUG_STATUS_MASK;
1915 dig_hotplug_reg &= ~mask;
1916 }
1917
1918 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1919 if (!hotplug_trigger)
1920 return;
1921
1922 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
1923 dig_hotplug_reg, hpd,
1924 pch_port_hotplug_long_detect);
1925
1926 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
1927 }
1928
1929 static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
1930 {
1931 int pipe;
1932 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1933
1934 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx);
1935
1936 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1937 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1938 SDE_AUDIO_POWER_SHIFT);
1939 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1940 port_name(port));
1941 }
1942
1943 if (pch_iir & SDE_AUX_MASK)
1944 dp_aux_irq_handler(dev_priv);
1945
1946 if (pch_iir & SDE_GMBUS)
1947 gmbus_irq_handler(dev_priv);
1948
1949 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1950 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1951
1952 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1953 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1954
1955 if (pch_iir & SDE_POISON)
1956 DRM_ERROR("PCH poison interrupt\n");
1957
1958 if (pch_iir & SDE_FDI_MASK)
1959 for_each_pipe(dev_priv, pipe)
1960 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1961 pipe_name(pipe),
1962 I915_READ(FDI_RX_IIR(pipe)));
1963
1964 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1965 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1966
1967 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1968 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1969
1970 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
1971 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
1972
1973 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1974 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
1975 }
1976
1977 static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
1978 {
1979 u32 err_int = I915_READ(GEN7_ERR_INT);
1980 enum pipe pipe;
1981
1982 if (err_int & ERR_INT_POISON)
1983 DRM_ERROR("Poison interrupt\n");
1984
1985 for_each_pipe(dev_priv, pipe) {
1986 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
1987 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1988
1989 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
1990 if (IS_IVYBRIDGE(dev_priv))
1991 ivb_pipe_crc_irq_handler(dev_priv, pipe);
1992 else
1993 hsw_pipe_crc_irq_handler(dev_priv, pipe);
1994 }
1995 }
1996
1997 I915_WRITE(GEN7_ERR_INT, err_int);
1998 }
1999
2000 static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
2001 {
2002 u32 serr_int = I915_READ(SERR_INT);
2003
2004 if (serr_int & SERR_INT_POISON)
2005 DRM_ERROR("PCH poison interrupt\n");
2006
2007 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2008 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2009
2010 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2011 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2012
2013 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2014 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2015
2016 I915_WRITE(SERR_INT, serr_int);
2017 }
2018
2019 static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2020 {
2021 int pipe;
2022 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2023
2024 ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt);
2025
2026 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2027 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2028 SDE_AUDIO_POWER_SHIFT_CPT);
2029 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2030 port_name(port));
2031 }
2032
2033 if (pch_iir & SDE_AUX_MASK_CPT)
2034 dp_aux_irq_handler(dev_priv);
2035
2036 if (pch_iir & SDE_GMBUS_CPT)
2037 gmbus_irq_handler(dev_priv);
2038
2039 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2040 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2041
2042 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2043 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2044
2045 if (pch_iir & SDE_FDI_MASK_CPT)
2046 for_each_pipe(dev_priv, pipe)
2047 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2048 pipe_name(pipe),
2049 I915_READ(FDI_RX_IIR(pipe)));
2050
2051 if (pch_iir & SDE_ERROR_CPT)
2052 cpt_serr_int_handler(dev_priv);
2053 }
2054
2055 static void spt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
2056 {
2057 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_SPT &
2058 ~SDE_PORTE_HOTPLUG_SPT;
2059 u32 hotplug2_trigger = pch_iir & SDE_PORTE_HOTPLUG_SPT;
2060 u32 pin_mask = 0, long_mask = 0;
2061
2062 if (hotplug_trigger) {
2063 u32 dig_hotplug_reg;
2064
2065 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2066 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2067
2068 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2069 dig_hotplug_reg, hpd_spt,
2070 spt_port_hotplug_long_detect);
2071 }
2072
2073 if (hotplug2_trigger) {
2074 u32 dig_hotplug_reg;
2075
2076 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG2);
2077 I915_WRITE(PCH_PORT_HOTPLUG2, dig_hotplug_reg);
2078
2079 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug2_trigger,
2080 dig_hotplug_reg, hpd_spt,
2081 spt_port_hotplug2_long_detect);
2082 }
2083
2084 if (pin_mask)
2085 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2086
2087 if (pch_iir & SDE_GMBUS_CPT)
2088 gmbus_irq_handler(dev_priv);
2089 }
2090
2091 static void ilk_hpd_irq_handler(struct drm_i915_private *dev_priv,
2092 u32 hotplug_trigger,
2093 const u32 hpd[HPD_NUM_PINS])
2094 {
2095 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2096
2097 dig_hotplug_reg = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
2098 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, dig_hotplug_reg);
2099
2100 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2101 dig_hotplug_reg, hpd,
2102 ilk_port_hotplug_long_detect);
2103
2104 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2105 }
2106
2107 static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
2108 u32 de_iir)
2109 {
2110 enum pipe pipe;
2111 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG;
2112
2113 if (hotplug_trigger)
2114 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ilk);
2115
2116 if (de_iir & DE_AUX_CHANNEL_A)
2117 dp_aux_irq_handler(dev_priv);
2118
2119 if (de_iir & DE_GSE)
2120 intel_opregion_asle_intr(dev_priv);
2121
2122 if (de_iir & DE_POISON)
2123 DRM_ERROR("Poison interrupt\n");
2124
2125 for_each_pipe(dev_priv, pipe) {
2126 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2127 intel_pipe_handle_vblank(dev_priv, pipe))
2128 intel_check_page_flip(dev_priv, pipe);
2129
2130 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2131 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2132
2133 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2134 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
2135
2136 /* plane/pipes map 1:1 on ilk+ */
2137 if (de_iir & DE_PLANE_FLIP_DONE(pipe))
2138 intel_finish_page_flip_cs(dev_priv, pipe);
2139 }
2140
2141 /* check event from PCH */
2142 if (de_iir & DE_PCH_EVENT) {
2143 u32 pch_iir = I915_READ(SDEIIR);
2144
2145 if (HAS_PCH_CPT(dev_priv))
2146 cpt_irq_handler(dev_priv, pch_iir);
2147 else
2148 ibx_irq_handler(dev_priv, pch_iir);
2149
2150 /* should clear PCH hotplug event before clear CPU irq */
2151 I915_WRITE(SDEIIR, pch_iir);
2152 }
2153
2154 if (IS_GEN5(dev_priv) && de_iir & DE_PCU_EVENT)
2155 ironlake_rps_change_irq_handler(dev_priv);
2156 }
2157
2158 static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
2159 u32 de_iir)
2160 {
2161 enum pipe pipe;
2162 u32 hotplug_trigger = de_iir & DE_DP_A_HOTPLUG_IVB;
2163
2164 if (hotplug_trigger)
2165 ilk_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ivb);
2166
2167 if (de_iir & DE_ERR_INT_IVB)
2168 ivb_err_int_handler(dev_priv);
2169
2170 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2171 dp_aux_irq_handler(dev_priv);
2172
2173 if (de_iir & DE_GSE_IVB)
2174 intel_opregion_asle_intr(dev_priv);
2175
2176 for_each_pipe(dev_priv, pipe) {
2177 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2178 intel_pipe_handle_vblank(dev_priv, pipe))
2179 intel_check_page_flip(dev_priv, pipe);
2180
2181 /* plane/pipes map 1:1 on ilk+ */
2182 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe))
2183 intel_finish_page_flip_cs(dev_priv, pipe);
2184 }
2185
2186 /* check event from PCH */
2187 if (!HAS_PCH_NOP(dev_priv) && (de_iir & DE_PCH_EVENT_IVB)) {
2188 u32 pch_iir = I915_READ(SDEIIR);
2189
2190 cpt_irq_handler(dev_priv, pch_iir);
2191
2192 /* clear PCH hotplug event before clear CPU irq */
2193 I915_WRITE(SDEIIR, pch_iir);
2194 }
2195 }
2196
2197 /*
2198 * To handle irqs with the minimum potential races with fresh interrupts, we:
2199 * 1 - Disable Master Interrupt Control.
2200 * 2 - Find the source(s) of the interrupt.
2201 * 3 - Clear the Interrupt Identity bits (IIR).
2202 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2203 * 5 - Re-enable Master Interrupt Control.
2204 */
2205 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2206 {
2207 struct drm_device *dev = arg;
2208 struct drm_i915_private *dev_priv = to_i915(dev);
2209 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2210 irqreturn_t ret = IRQ_NONE;
2211
2212 if (!intel_irqs_enabled(dev_priv))
2213 return IRQ_NONE;
2214
2215 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2216 disable_rpm_wakeref_asserts(dev_priv);
2217
2218 /* disable master interrupt before clearing iir */
2219 de_ier = I915_READ(DEIER);
2220 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2221 POSTING_READ(DEIER);
2222
2223 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2224 * interrupts will will be stored on its back queue, and then we'll be
2225 * able to process them after we restore SDEIER (as soon as we restore
2226 * it, we'll get an interrupt if SDEIIR still has something to process
2227 * due to its back queue). */
2228 if (!HAS_PCH_NOP(dev_priv)) {
2229 sde_ier = I915_READ(SDEIER);
2230 I915_WRITE(SDEIER, 0);
2231 POSTING_READ(SDEIER);
2232 }
2233
2234 /* Find, clear, then process each source of interrupt */
2235
2236 gt_iir = I915_READ(GTIIR);
2237 if (gt_iir) {
2238 I915_WRITE(GTIIR, gt_iir);
2239 ret = IRQ_HANDLED;
2240 if (INTEL_GEN(dev_priv) >= 6)
2241 snb_gt_irq_handler(dev_priv, gt_iir);
2242 else
2243 ilk_gt_irq_handler(dev_priv, gt_iir);
2244 }
2245
2246 de_iir = I915_READ(DEIIR);
2247 if (de_iir) {
2248 I915_WRITE(DEIIR, de_iir);
2249 ret = IRQ_HANDLED;
2250 if (INTEL_GEN(dev_priv) >= 7)
2251 ivb_display_irq_handler(dev_priv, de_iir);
2252 else
2253 ilk_display_irq_handler(dev_priv, de_iir);
2254 }
2255
2256 if (INTEL_GEN(dev_priv) >= 6) {
2257 u32 pm_iir = I915_READ(GEN6_PMIIR);
2258 if (pm_iir) {
2259 I915_WRITE(GEN6_PMIIR, pm_iir);
2260 ret = IRQ_HANDLED;
2261 gen6_rps_irq_handler(dev_priv, pm_iir);
2262 }
2263 }
2264
2265 I915_WRITE(DEIER, de_ier);
2266 POSTING_READ(DEIER);
2267 if (!HAS_PCH_NOP(dev_priv)) {
2268 I915_WRITE(SDEIER, sde_ier);
2269 POSTING_READ(SDEIER);
2270 }
2271
2272 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2273 enable_rpm_wakeref_asserts(dev_priv);
2274
2275 return ret;
2276 }
2277
2278 static void bxt_hpd_irq_handler(struct drm_i915_private *dev_priv,
2279 u32 hotplug_trigger,
2280 const u32 hpd[HPD_NUM_PINS])
2281 {
2282 u32 dig_hotplug_reg, pin_mask = 0, long_mask = 0;
2283
2284 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2285 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2286
2287 intel_get_hpd_pins(&pin_mask, &long_mask, hotplug_trigger,
2288 dig_hotplug_reg, hpd,
2289 bxt_port_hotplug_long_detect);
2290
2291 intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
2292 }
2293
2294 static irqreturn_t
2295 gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
2296 {
2297 irqreturn_t ret = IRQ_NONE;
2298 u32 iir;
2299 enum pipe pipe;
2300
2301 if (master_ctl & GEN8_DE_MISC_IRQ) {
2302 iir = I915_READ(GEN8_DE_MISC_IIR);
2303 if (iir) {
2304 I915_WRITE(GEN8_DE_MISC_IIR, iir);
2305 ret = IRQ_HANDLED;
2306 if (iir & GEN8_DE_MISC_GSE)
2307 intel_opregion_asle_intr(dev_priv);
2308 else
2309 DRM_ERROR("Unexpected DE Misc interrupt\n");
2310 }
2311 else
2312 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2313 }
2314
2315 if (master_ctl & GEN8_DE_PORT_IRQ) {
2316 iir = I915_READ(GEN8_DE_PORT_IIR);
2317 if (iir) {
2318 u32 tmp_mask;
2319 bool found = false;
2320
2321 I915_WRITE(GEN8_DE_PORT_IIR, iir);
2322 ret = IRQ_HANDLED;
2323
2324 tmp_mask = GEN8_AUX_CHANNEL_A;
2325 if (INTEL_INFO(dev_priv)->gen >= 9)
2326 tmp_mask |= GEN9_AUX_CHANNEL_B |
2327 GEN9_AUX_CHANNEL_C |
2328 GEN9_AUX_CHANNEL_D;
2329
2330 if (iir & tmp_mask) {
2331 dp_aux_irq_handler(dev_priv);
2332 found = true;
2333 }
2334
2335 if (IS_BROXTON(dev_priv)) {
2336 tmp_mask = iir & BXT_DE_PORT_HOTPLUG_MASK;
2337 if (tmp_mask) {
2338 bxt_hpd_irq_handler(dev_priv, tmp_mask,
2339 hpd_bxt);
2340 found = true;
2341 }
2342 } else if (IS_BROADWELL(dev_priv)) {
2343 tmp_mask = iir & GEN8_PORT_DP_A_HOTPLUG;
2344 if (tmp_mask) {
2345 ilk_hpd_irq_handler(dev_priv,
2346 tmp_mask, hpd_bdw);
2347 found = true;
2348 }
2349 }
2350
2351 if (IS_BROXTON(dev_priv) && (iir & BXT_DE_PORT_GMBUS)) {
2352 gmbus_irq_handler(dev_priv);
2353 found = true;
2354 }
2355
2356 if (!found)
2357 DRM_ERROR("Unexpected DE Port interrupt\n");
2358 }
2359 else
2360 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2361 }
2362
2363 for_each_pipe(dev_priv, pipe) {
2364 u32 flip_done, fault_errors;
2365
2366 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2367 continue;
2368
2369 iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2370 if (!iir) {
2371 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2372 continue;
2373 }
2374
2375 ret = IRQ_HANDLED;
2376 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
2377
2378 if (iir & GEN8_PIPE_VBLANK &&
2379 intel_pipe_handle_vblank(dev_priv, pipe))
2380 intel_check_page_flip(dev_priv, pipe);
2381
2382 flip_done = iir;
2383 if (INTEL_INFO(dev_priv)->gen >= 9)
2384 flip_done &= GEN9_PIPE_PLANE1_FLIP_DONE;
2385 else
2386 flip_done &= GEN8_PIPE_PRIMARY_FLIP_DONE;
2387
2388 if (flip_done)
2389 intel_finish_page_flip_cs(dev_priv, pipe);
2390
2391 if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
2392 hsw_pipe_crc_irq_handler(dev_priv, pipe);
2393
2394 if (iir & GEN8_PIPE_FIFO_UNDERRUN)
2395 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2396
2397 fault_errors = iir;
2398 if (INTEL_INFO(dev_priv)->gen >= 9)
2399 fault_errors &= GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2400 else
2401 fault_errors &= GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2402
2403 if (fault_errors)
2404 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2405 pipe_name(pipe),
2406 fault_errors);
2407 }
2408
2409 if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
2410 master_ctl & GEN8_DE_PCH_IRQ) {
2411 /*
2412 * FIXME(BDW): Assume for now that the new interrupt handling
2413 * scheme also closed the SDE interrupt handling race we've seen
2414 * on older pch-split platforms. But this needs testing.
2415 */
2416 iir = I915_READ(SDEIIR);
2417 if (iir) {
2418 I915_WRITE(SDEIIR, iir);
2419 ret = IRQ_HANDLED;
2420
2421 if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
2422 spt_irq_handler(dev_priv, iir);
2423 else
2424 cpt_irq_handler(dev_priv, iir);
2425 } else {
2426 /*
2427 * Like on previous PCH there seems to be something
2428 * fishy going on with forwarding PCH interrupts.
2429 */
2430 DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
2431 }
2432 }
2433
2434 return ret;
2435 }
2436
2437 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2438 {
2439 struct drm_device *dev = arg;
2440 struct drm_i915_private *dev_priv = to_i915(dev);
2441 u32 master_ctl;
2442 u32 gt_iir[4] = {};
2443 irqreturn_t ret;
2444
2445 if (!intel_irqs_enabled(dev_priv))
2446 return IRQ_NONE;
2447
2448 master_ctl = I915_READ_FW(GEN8_MASTER_IRQ);
2449 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2450 if (!master_ctl)
2451 return IRQ_NONE;
2452
2453 I915_WRITE_FW(GEN8_MASTER_IRQ, 0);
2454
2455 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
2456 disable_rpm_wakeref_asserts(dev_priv);
2457
2458 /* Find, clear, then process each source of interrupt */
2459 ret = gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir);
2460 gen8_gt_irq_handler(dev_priv, gt_iir);
2461 ret |= gen8_de_irq_handler(dev_priv, master_ctl);
2462
2463 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2464 POSTING_READ_FW(GEN8_MASTER_IRQ);
2465
2466 enable_rpm_wakeref_asserts(dev_priv);
2467
2468 return ret;
2469 }
2470
2471 static void i915_error_wake_up(struct drm_i915_private *dev_priv)
2472 {
2473 /*
2474 * Notify all waiters for GPU completion events that reset state has
2475 * been changed, and that they need to restart their wait after
2476 * checking for potential errors (and bail out to drop locks if there is
2477 * a gpu reset pending so that i915_error_work_func can acquire them).
2478 */
2479
2480 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2481 wake_up_all(&dev_priv->gpu_error.wait_queue);
2482
2483 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2484 wake_up_all(&dev_priv->pending_flip_queue);
2485 }
2486
2487 /**
2488 * i915_reset_and_wakeup - do process context error handling work
2489 * @dev_priv: i915 device private
2490 *
2491 * Fire an error uevent so userspace can see that a hang or error
2492 * was detected.
2493 */
2494 static void i915_reset_and_wakeup(struct drm_i915_private *dev_priv)
2495 {
2496 struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
2497 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2498 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2499 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2500 int ret;
2501
2502 kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
2503
2504 /*
2505 * Note that there's only one work item which does gpu resets, so we
2506 * need not worry about concurrent gpu resets potentially incrementing
2507 * error->reset_counter twice. We only need to take care of another
2508 * racing irq/hangcheck declaring the gpu dead for a second time. A
2509 * quick check for that is good enough: schedule_work ensures the
2510 * correct ordering between hang detection and this work item, and since
2511 * the reset in-progress bit is only ever set by code outside of this
2512 * work we don't need to worry about any other races.
2513 */
2514 if (i915_reset_in_progress(&dev_priv->gpu_error)) {
2515 DRM_DEBUG_DRIVER("resetting chip\n");
2516 kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
2517
2518 /*
2519 * In most cases it's guaranteed that we get here with an RPM
2520 * reference held, for example because there is a pending GPU
2521 * request that won't finish until the reset is done. This
2522 * isn't the case at least when we get here by doing a
2523 * simulated reset via debugs, so get an RPM reference.
2524 */
2525 intel_runtime_pm_get(dev_priv);
2526
2527 intel_prepare_reset(dev_priv);
2528
2529 /*
2530 * All state reset _must_ be completed before we update the
2531 * reset counter, for otherwise waiters might miss the reset
2532 * pending state and not properly drop locks, resulting in
2533 * deadlocks with the reset work.
2534 */
2535 ret = i915_reset(dev_priv);
2536
2537 intel_finish_reset(dev_priv);
2538
2539 intel_runtime_pm_put(dev_priv);
2540
2541 if (ret == 0)
2542 kobject_uevent_env(kobj,
2543 KOBJ_CHANGE, reset_done_event);
2544
2545 /*
2546 * Note: The wake_up also serves as a memory barrier so that
2547 * waiters see the update value of the reset counter atomic_t.
2548 */
2549 wake_up_all(&dev_priv->gpu_error.reset_queue);
2550 }
2551 }
2552
2553 static void i915_report_and_clear_eir(struct drm_i915_private *dev_priv)
2554 {
2555 uint32_t instdone[I915_NUM_INSTDONE_REG];
2556 u32 eir = I915_READ(EIR);
2557 int pipe, i;
2558
2559 if (!eir)
2560 return;
2561
2562 pr_err("render error detected, EIR: 0x%08x\n", eir);
2563
2564 i915_get_extra_instdone(dev_priv, instdone);
2565
2566 if (IS_G4X(dev_priv)) {
2567 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2568 u32 ipeir = I915_READ(IPEIR_I965);
2569
2570 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2571 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2572 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2573 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2574 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2575 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2576 I915_WRITE(IPEIR_I965, ipeir);
2577 POSTING_READ(IPEIR_I965);
2578 }
2579 if (eir & GM45_ERROR_PAGE_TABLE) {
2580 u32 pgtbl_err = I915_READ(PGTBL_ER);
2581 pr_err("page table error\n");
2582 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2583 I915_WRITE(PGTBL_ER, pgtbl_err);
2584 POSTING_READ(PGTBL_ER);
2585 }
2586 }
2587
2588 if (!IS_GEN2(dev_priv)) {
2589 if (eir & I915_ERROR_PAGE_TABLE) {
2590 u32 pgtbl_err = I915_READ(PGTBL_ER);
2591 pr_err("page table error\n");
2592 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2593 I915_WRITE(PGTBL_ER, pgtbl_err);
2594 POSTING_READ(PGTBL_ER);
2595 }
2596 }
2597
2598 if (eir & I915_ERROR_MEMORY_REFRESH) {
2599 pr_err("memory refresh error:\n");
2600 for_each_pipe(dev_priv, pipe)
2601 pr_err("pipe %c stat: 0x%08x\n",
2602 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2603 /* pipestat has already been acked */
2604 }
2605 if (eir & I915_ERROR_INSTRUCTION) {
2606 pr_err("instruction error\n");
2607 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2608 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2609 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2610 if (INTEL_GEN(dev_priv) < 4) {
2611 u32 ipeir = I915_READ(IPEIR);
2612
2613 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2614 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2615 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2616 I915_WRITE(IPEIR, ipeir);
2617 POSTING_READ(IPEIR);
2618 } else {
2619 u32 ipeir = I915_READ(IPEIR_I965);
2620
2621 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2622 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2623 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2624 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2625 I915_WRITE(IPEIR_I965, ipeir);
2626 POSTING_READ(IPEIR_I965);
2627 }
2628 }
2629
2630 I915_WRITE(EIR, eir);
2631 POSTING_READ(EIR);
2632 eir = I915_READ(EIR);
2633 if (eir) {
2634 /*
2635 * some errors might have become stuck,
2636 * mask them.
2637 */
2638 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2639 I915_WRITE(EMR, I915_READ(EMR) | eir);
2640 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2641 }
2642 }
2643
2644 /**
2645 * i915_handle_error - handle a gpu error
2646 * @dev_priv: i915 device private
2647 * @engine_mask: mask representing engines that are hung
2648 * Do some basic checking of register state at error time and
2649 * dump it to the syslog. Also call i915_capture_error_state() to make
2650 * sure we get a record and make it available in debugfs. Fire a uevent
2651 * so userspace knows something bad happened (should trigger collection
2652 * of a ring dump etc.).
2653 * @fmt: Error message format string
2654 */
2655 void i915_handle_error(struct drm_i915_private *dev_priv,
2656 u32 engine_mask,
2657 const char *fmt, ...)
2658 {
2659 va_list args;
2660 char error_msg[80];
2661
2662 va_start(args, fmt);
2663 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2664 va_end(args);
2665
2666 i915_capture_error_state(dev_priv, engine_mask, error_msg);
2667 i915_report_and_clear_eir(dev_priv);
2668
2669 if (engine_mask) {
2670 atomic_or(I915_RESET_IN_PROGRESS_FLAG,
2671 &dev_priv->gpu_error.reset_counter);
2672
2673 /*
2674 * Wakeup waiting processes so that the reset function
2675 * i915_reset_and_wakeup doesn't deadlock trying to grab
2676 * various locks. By bumping the reset counter first, the woken
2677 * processes will see a reset in progress and back off,
2678 * releasing their locks and then wait for the reset completion.
2679 * We must do this for _all_ gpu waiters that might hold locks
2680 * that the reset work needs to acquire.
2681 *
2682 * Note: The wake_up serves as the required memory barrier to
2683 * ensure that the waiters see the updated value of the reset
2684 * counter atomic_t.
2685 */
2686 i915_error_wake_up(dev_priv);
2687 }
2688
2689 i915_reset_and_wakeup(dev_priv);
2690 }
2691
2692 /* Called from drm generic code, passed 'crtc' which
2693 * we use as a pipe index
2694 */
2695 static int i915_enable_vblank(struct drm_device *dev, unsigned int pipe)
2696 {
2697 struct drm_i915_private *dev_priv = to_i915(dev);
2698 unsigned long irqflags;
2699
2700 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2701 if (INTEL_INFO(dev)->gen >= 4)
2702 i915_enable_pipestat(dev_priv, pipe,
2703 PIPE_START_VBLANK_INTERRUPT_STATUS);
2704 else
2705 i915_enable_pipestat(dev_priv, pipe,
2706 PIPE_VBLANK_INTERRUPT_STATUS);
2707 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2708
2709 return 0;
2710 }
2711
2712 static int ironlake_enable_vblank(struct drm_device *dev, unsigned int pipe)
2713 {
2714 struct drm_i915_private *dev_priv = to_i915(dev);
2715 unsigned long irqflags;
2716 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2717 DE_PIPE_VBLANK(pipe);
2718
2719 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2720 ilk_enable_display_irq(dev_priv, bit);
2721 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2722
2723 return 0;
2724 }
2725
2726 static int valleyview_enable_vblank(struct drm_device *dev, unsigned int pipe)
2727 {
2728 struct drm_i915_private *dev_priv = to_i915(dev);
2729 unsigned long irqflags;
2730
2731 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2732 i915_enable_pipestat(dev_priv, pipe,
2733 PIPE_START_VBLANK_INTERRUPT_STATUS);
2734 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2735
2736 return 0;
2737 }
2738
2739 static int gen8_enable_vblank(struct drm_device *dev, unsigned int pipe)
2740 {
2741 struct drm_i915_private *dev_priv = to_i915(dev);
2742 unsigned long irqflags;
2743
2744 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2745 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2746 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2747
2748 return 0;
2749 }
2750
2751 /* Called from drm generic code, passed 'crtc' which
2752 * we use as a pipe index
2753 */
2754 static void i915_disable_vblank(struct drm_device *dev, unsigned int pipe)
2755 {
2756 struct drm_i915_private *dev_priv = to_i915(dev);
2757 unsigned long irqflags;
2758
2759 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2760 i915_disable_pipestat(dev_priv, pipe,
2761 PIPE_VBLANK_INTERRUPT_STATUS |
2762 PIPE_START_VBLANK_INTERRUPT_STATUS);
2763 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2764 }
2765
2766 static void ironlake_disable_vblank(struct drm_device *dev, unsigned int pipe)
2767 {
2768 struct drm_i915_private *dev_priv = to_i915(dev);
2769 unsigned long irqflags;
2770 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2771 DE_PIPE_VBLANK(pipe);
2772
2773 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2774 ilk_disable_display_irq(dev_priv, bit);
2775 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2776 }
2777
2778 static void valleyview_disable_vblank(struct drm_device *dev, unsigned int pipe)
2779 {
2780 struct drm_i915_private *dev_priv = to_i915(dev);
2781 unsigned long irqflags;
2782
2783 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2784 i915_disable_pipestat(dev_priv, pipe,
2785 PIPE_START_VBLANK_INTERRUPT_STATUS);
2786 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2787 }
2788
2789 static void gen8_disable_vblank(struct drm_device *dev, unsigned int pipe)
2790 {
2791 struct drm_i915_private *dev_priv = to_i915(dev);
2792 unsigned long irqflags;
2793
2794 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2795 bdw_disable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK);
2796 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2797 }
2798
2799 static bool
2800 ipehr_is_semaphore_wait(struct intel_engine_cs *engine, u32 ipehr)
2801 {
2802 if (INTEL_GEN(engine->i915) >= 8) {
2803 return (ipehr >> 23) == 0x1c;
2804 } else {
2805 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2806 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2807 MI_SEMAPHORE_REGISTER);
2808 }
2809 }
2810
2811 static struct intel_engine_cs *
2812 semaphore_wait_to_signaller_ring(struct intel_engine_cs *engine, u32 ipehr,
2813 u64 offset)
2814 {
2815 struct drm_i915_private *dev_priv = engine->i915;
2816 struct intel_engine_cs *signaller;
2817
2818 if (INTEL_GEN(dev_priv) >= 8) {
2819 for_each_engine(signaller, dev_priv) {
2820 if (engine == signaller)
2821 continue;
2822
2823 if (offset == signaller->semaphore.signal_ggtt[engine->id])
2824 return signaller;
2825 }
2826 } else {
2827 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2828
2829 for_each_engine(signaller, dev_priv) {
2830 if(engine == signaller)
2831 continue;
2832
2833 if (sync_bits == signaller->semaphore.mbox.wait[engine->id])
2834 return signaller;
2835 }
2836 }
2837
2838 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2839 engine->id, ipehr, offset);
2840
2841 return NULL;
2842 }
2843
2844 static struct intel_engine_cs *
2845 semaphore_waits_for(struct intel_engine_cs *engine, u32 *seqno)
2846 {
2847 struct drm_i915_private *dev_priv = engine->i915;
2848 void __iomem *vaddr;
2849 u32 cmd, ipehr, head;
2850 u64 offset = 0;
2851 int i, backwards;
2852
2853 /*
2854 * This function does not support execlist mode - any attempt to
2855 * proceed further into this function will result in a kernel panic
2856 * when dereferencing ring->buffer, which is not set up in execlist
2857 * mode.
2858 *
2859 * The correct way of doing it would be to derive the currently
2860 * executing ring buffer from the current context, which is derived
2861 * from the currently running request. Unfortunately, to get the
2862 * current request we would have to grab the struct_mutex before doing
2863 * anything else, which would be ill-advised since some other thread
2864 * might have grabbed it already and managed to hang itself, causing
2865 * the hang checker to deadlock.
2866 *
2867 * Therefore, this function does not support execlist mode in its
2868 * current form. Just return NULL and move on.
2869 */
2870 if (engine->buffer == NULL)
2871 return NULL;
2872
2873 ipehr = I915_READ(RING_IPEHR(engine->mmio_base));
2874 if (!ipehr_is_semaphore_wait(engine, ipehr))
2875 return NULL;
2876
2877 /*
2878 * HEAD is likely pointing to the dword after the actual command,
2879 * so scan backwards until we find the MBOX. But limit it to just 3
2880 * or 4 dwords depending on the semaphore wait command size.
2881 * Note that we don't care about ACTHD here since that might
2882 * point at at batch, and semaphores are always emitted into the
2883 * ringbuffer itself.
2884 */
2885 head = I915_READ_HEAD(engine) & HEAD_ADDR;
2886 backwards = (INTEL_GEN(dev_priv) >= 8) ? 5 : 4;
2887 vaddr = (void __iomem *)engine->buffer->vaddr;
2888
2889 for (i = backwards; i; --i) {
2890 /*
2891 * Be paranoid and presume the hw has gone off into the wild -
2892 * our ring is smaller than what the hardware (and hence
2893 * HEAD_ADDR) allows. Also handles wrap-around.
2894 */
2895 head &= engine->buffer->size - 1;
2896
2897 /* This here seems to blow up */
2898 cmd = ioread32(vaddr + head);
2899 if (cmd == ipehr)
2900 break;
2901
2902 head -= 4;
2903 }
2904
2905 if (!i)
2906 return NULL;
2907
2908 *seqno = ioread32(vaddr + head + 4) + 1;
2909 if (INTEL_GEN(dev_priv) >= 8) {
2910 offset = ioread32(vaddr + head + 12);
2911 offset <<= 32;
2912 offset |= ioread32(vaddr + head + 8);
2913 }
2914 return semaphore_wait_to_signaller_ring(engine, ipehr, offset);
2915 }
2916
2917 static int semaphore_passed(struct intel_engine_cs *engine)
2918 {
2919 struct drm_i915_private *dev_priv = engine->i915;
2920 struct intel_engine_cs *signaller;
2921 u32 seqno;
2922
2923 engine->hangcheck.deadlock++;
2924
2925 signaller = semaphore_waits_for(engine, &seqno);
2926 if (signaller == NULL)
2927 return -1;
2928
2929 /* Prevent pathological recursion due to driver bugs */
2930 if (signaller->hangcheck.deadlock >= I915_NUM_ENGINES)
2931 return -1;
2932
2933 if (i915_seqno_passed(intel_engine_get_seqno(signaller), seqno))
2934 return 1;
2935
2936 /* cursory check for an unkickable deadlock */
2937 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2938 semaphore_passed(signaller) < 0)
2939 return -1;
2940
2941 return 0;
2942 }
2943
2944 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2945 {
2946 struct intel_engine_cs *engine;
2947
2948 for_each_engine(engine, dev_priv)
2949 engine->hangcheck.deadlock = 0;
2950 }
2951
2952 static bool subunits_stuck(struct intel_engine_cs *engine)
2953 {
2954 u32 instdone[I915_NUM_INSTDONE_REG];
2955 bool stuck;
2956 int i;
2957
2958 if (engine->id != RCS)
2959 return true;
2960
2961 i915_get_extra_instdone(engine->i915, instdone);
2962
2963 /* There might be unstable subunit states even when
2964 * actual head is not moving. Filter out the unstable ones by
2965 * accumulating the undone -> done transitions and only
2966 * consider those as progress.
2967 */
2968 stuck = true;
2969 for (i = 0; i < I915_NUM_INSTDONE_REG; i++) {
2970 const u32 tmp = instdone[i] | engine->hangcheck.instdone[i];
2971
2972 if (tmp != engine->hangcheck.instdone[i])
2973 stuck = false;
2974
2975 engine->hangcheck.instdone[i] |= tmp;
2976 }
2977
2978 return stuck;
2979 }
2980
2981 static enum intel_engine_hangcheck_action
2982 head_stuck(struct intel_engine_cs *engine, u64 acthd)
2983 {
2984 if (acthd != engine->hangcheck.acthd) {
2985
2986 /* Clear subunit states on head movement */
2987 memset(engine->hangcheck.instdone, 0,
2988 sizeof(engine->hangcheck.instdone));
2989
2990 return HANGCHECK_ACTIVE;
2991 }
2992
2993 if (!subunits_stuck(engine))
2994 return HANGCHECK_ACTIVE;
2995
2996 return HANGCHECK_HUNG;
2997 }
2998
2999 static enum intel_engine_hangcheck_action
3000 engine_stuck(struct intel_engine_cs *engine, u64 acthd)
3001 {
3002 struct drm_i915_private *dev_priv = engine->i915;
3003 enum intel_engine_hangcheck_action ha;
3004 u32 tmp;
3005
3006 ha = head_stuck(engine, acthd);
3007 if (ha != HANGCHECK_HUNG)
3008 return ha;
3009
3010 if (IS_GEN2(dev_priv))
3011 return HANGCHECK_HUNG;
3012
3013 /* Is the chip hanging on a WAIT_FOR_EVENT?
3014 * If so we can simply poke the RB_WAIT bit
3015 * and break the hang. This should work on
3016 * all but the second generation chipsets.
3017 */
3018 tmp = I915_READ_CTL(engine);
3019 if (tmp & RING_WAIT) {
3020 i915_handle_error(dev_priv, 0,
3021 "Kicking stuck wait on %s",
3022 engine->name);
3023 I915_WRITE_CTL(engine, tmp);
3024 return HANGCHECK_KICK;
3025 }
3026
3027 if (INTEL_GEN(dev_priv) >= 6 && tmp & RING_WAIT_SEMAPHORE) {
3028 switch (semaphore_passed(engine)) {
3029 default:
3030 return HANGCHECK_HUNG;
3031 case 1:
3032 i915_handle_error(dev_priv, 0,
3033 "Kicking stuck semaphore on %s",
3034 engine->name);
3035 I915_WRITE_CTL(engine, tmp);
3036 return HANGCHECK_KICK;
3037 case 0:
3038 return HANGCHECK_WAIT;
3039 }
3040 }
3041
3042 return HANGCHECK_HUNG;
3043 }
3044
3045 /*
3046 * This is called when the chip hasn't reported back with completed
3047 * batchbuffers in a long time. We keep track per ring seqno progress and
3048 * if there are no progress, hangcheck score for that ring is increased.
3049 * Further, acthd is inspected to see if the ring is stuck. On stuck case
3050 * we kick the ring. If we see no progress on three subsequent calls
3051 * we assume chip is wedged and try to fix it by resetting the chip.
3052 */
3053 static void i915_hangcheck_elapsed(struct work_struct *work)
3054 {
3055 struct drm_i915_private *dev_priv =
3056 container_of(work, typeof(*dev_priv),
3057 gpu_error.hangcheck_work.work);
3058 struct intel_engine_cs *engine;
3059 unsigned int hung = 0, stuck = 0;
3060 int busy_count = 0;
3061 #define BUSY 1
3062 #define KICK 5
3063 #define HUNG 20
3064 #define ACTIVE_DECAY 15
3065
3066 if (!i915.enable_hangcheck)
3067 return;
3068
3069 if (!READ_ONCE(dev_priv->gt.awake))
3070 return;
3071
3072 /* As enabling the GPU requires fairly extensive mmio access,
3073 * periodically arm the mmio checker to see if we are triggering
3074 * any invalid access.
3075 */
3076 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
3077
3078 for_each_engine(engine, dev_priv) {
3079 bool busy = intel_engine_has_waiter(engine);
3080 u64 acthd;
3081 u32 seqno;
3082
3083 semaphore_clear_deadlocks(dev_priv);
3084
3085 /* We don't strictly need an irq-barrier here, as we are not
3086 * serving an interrupt request, be paranoid in case the
3087 * barrier has side-effects (such as preventing a broken
3088 * cacheline snoop) and so be sure that we can see the seqno
3089 * advance. If the seqno should stick, due to a stale
3090 * cacheline, we would erroneously declare the GPU hung.
3091 */
3092 if (engine->irq_seqno_barrier)
3093 engine->irq_seqno_barrier(engine);
3094
3095 acthd = intel_engine_get_active_head(engine);
3096 seqno = intel_engine_get_seqno(engine);
3097
3098 if (engine->hangcheck.seqno == seqno) {
3099 if (!intel_engine_is_active(engine)) {
3100 engine->hangcheck.action = HANGCHECK_IDLE;
3101 if (busy) {
3102 /* Safeguard against driver failure */
3103 engine->hangcheck.score += BUSY;
3104 }
3105 } else {
3106 /* We always increment the hangcheck score
3107 * if the engine is busy and still processing
3108 * the same request, so that no single request
3109 * can run indefinitely (such as a chain of
3110 * batches). The only time we do not increment
3111 * the hangcheck score on this ring, if this
3112 * engine is in a legitimate wait for another
3113 * engine. In that case the waiting engine is a
3114 * victim and we want to be sure we catch the
3115 * right culprit. Then every time we do kick
3116 * the ring, add a small increment to the
3117 * score so that we can catch a batch that is
3118 * being repeatedly kicked and so responsible
3119 * for stalling the machine.
3120 */
3121 engine->hangcheck.action =
3122 engine_stuck(engine, acthd);
3123
3124 switch (engine->hangcheck.action) {
3125 case HANGCHECK_IDLE:
3126 case HANGCHECK_WAIT:
3127 break;
3128 case HANGCHECK_ACTIVE:
3129 engine->hangcheck.score += BUSY;
3130 break;
3131 case HANGCHECK_KICK:
3132 engine->hangcheck.score += KICK;
3133 break;
3134 case HANGCHECK_HUNG:
3135 engine->hangcheck.score += HUNG;
3136 break;
3137 }
3138 }
3139
3140 if (engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3141 hung |= intel_engine_flag(engine);
3142 if (engine->hangcheck.action != HANGCHECK_HUNG)
3143 stuck |= intel_engine_flag(engine);
3144 }
3145 } else {
3146 engine->hangcheck.action = HANGCHECK_ACTIVE;
3147
3148 /* Gradually reduce the count so that we catch DoS
3149 * attempts across multiple batches.
3150 */
3151 if (engine->hangcheck.score > 0)
3152 engine->hangcheck.score -= ACTIVE_DECAY;
3153 if (engine->hangcheck.score < 0)
3154 engine->hangcheck.score = 0;
3155
3156 /* Clear head and subunit states on seqno movement */
3157 acthd = 0;
3158
3159 memset(engine->hangcheck.instdone, 0,
3160 sizeof(engine->hangcheck.instdone));
3161 }
3162
3163 engine->hangcheck.seqno = seqno;
3164 engine->hangcheck.acthd = acthd;
3165 busy_count += busy;
3166 }
3167
3168 if (hung) {
3169 char msg[80];
3170 int len;
3171
3172 /* If some rings hung but others were still busy, only
3173 * blame the hanging rings in the synopsis.
3174 */
3175 if (stuck != hung)
3176 hung &= ~stuck;
3177 len = scnprintf(msg, sizeof(msg),
3178 "%s on ", stuck == hung ? "No progress" : "Hang");
3179 for_each_engine_masked(engine, dev_priv, hung)
3180 len += scnprintf(msg + len, sizeof(msg) - len,
3181 "%s, ", engine->name);
3182 msg[len-2] = '\0';
3183
3184 return i915_handle_error(dev_priv, hung, msg);
3185 }
3186
3187 /* Reset timer in case GPU hangs without another request being added */
3188 if (busy_count)
3189 i915_queue_hangcheck(dev_priv);
3190 }
3191
3192 static void ibx_irq_reset(struct drm_device *dev)
3193 {
3194 struct drm_i915_private *dev_priv = to_i915(dev);
3195
3196 if (HAS_PCH_NOP(dev))
3197 return;
3198
3199 GEN5_IRQ_RESET(SDE);
3200
3201 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3202 I915_WRITE(SERR_INT, 0xffffffff);
3203 }
3204
3205 /*
3206 * SDEIER is also touched by the interrupt handler to work around missed PCH
3207 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3208 * instead we unconditionally enable all PCH interrupt sources here, but then
3209 * only unmask them as needed with SDEIMR.
3210 *
3211 * This function needs to be called before interrupts are enabled.
3212 */
3213 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3214 {
3215 struct drm_i915_private *dev_priv = to_i915(dev);
3216
3217 if (HAS_PCH_NOP(dev))
3218 return;
3219
3220 WARN_ON(I915_READ(SDEIER) != 0);
3221 I915_WRITE(SDEIER, 0xffffffff);
3222 POSTING_READ(SDEIER);
3223 }
3224
3225 static void gen5_gt_irq_reset(struct drm_device *dev)
3226 {
3227 struct drm_i915_private *dev_priv = to_i915(dev);
3228
3229 GEN5_IRQ_RESET(GT);
3230 if (INTEL_INFO(dev)->gen >= 6)
3231 GEN5_IRQ_RESET(GEN6_PM);
3232 }
3233
3234 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3235 {
3236 enum pipe pipe;
3237
3238 if (IS_CHERRYVIEW(dev_priv))
3239 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3240 else
3241 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3242
3243 i915_hotplug_interrupt_update_locked(dev_priv, 0xffffffff, 0);
3244 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3245
3246 for_each_pipe(dev_priv, pipe) {
3247 I915_WRITE(PIPESTAT(pipe),
3248 PIPE_FIFO_UNDERRUN_STATUS |
3249 PIPESTAT_INT_STATUS_MASK);
3250 dev_priv->pipestat_irq_mask[pipe] = 0;
3251 }
3252
3253 GEN5_IRQ_RESET(VLV_);
3254 dev_priv->irq_mask = ~0;
3255 }
3256
3257 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3258 {
3259 u32 pipestat_mask;
3260 u32 enable_mask;
3261 enum pipe pipe;
3262
3263 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3264 PIPE_CRC_DONE_INTERRUPT_STATUS;
3265
3266 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3267 for_each_pipe(dev_priv, pipe)
3268 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3269
3270 enable_mask = I915_DISPLAY_PORT_INTERRUPT |
3271 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3272 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3273 if (IS_CHERRYVIEW(dev_priv))
3274 enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3275
3276 WARN_ON(dev_priv->irq_mask != ~0);
3277
3278 dev_priv->irq_mask = ~enable_mask;
3279
3280 GEN5_IRQ_INIT(VLV_, dev_priv->irq_mask, enable_mask);
3281 }
3282
3283 /* drm_dma.h hooks
3284 */
3285 static void ironlake_irq_reset(struct drm_device *dev)
3286 {
3287 struct drm_i915_private *dev_priv = to_i915(dev);
3288
3289 I915_WRITE(HWSTAM, 0xffffffff);
3290
3291 GEN5_IRQ_RESET(DE);
3292 if (IS_GEN7(dev))
3293 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3294
3295 gen5_gt_irq_reset(dev);
3296
3297 ibx_irq_reset(dev);
3298 }
3299
3300 static void valleyview_irq_preinstall(struct drm_device *dev)
3301 {
3302 struct drm_i915_private *dev_priv = to_i915(dev);
3303
3304 I915_WRITE(VLV_MASTER_IER, 0);
3305 POSTING_READ(VLV_MASTER_IER);
3306
3307 gen5_gt_irq_reset(dev);
3308
3309 spin_lock_irq(&dev_priv->irq_lock);
3310 if (dev_priv->display_irqs_enabled)
3311 vlv_display_irq_reset(dev_priv);
3312 spin_unlock_irq(&dev_priv->irq_lock);
3313 }
3314
3315 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3316 {
3317 GEN8_IRQ_RESET_NDX(GT, 0);
3318 GEN8_IRQ_RESET_NDX(GT, 1);
3319 GEN8_IRQ_RESET_NDX(GT, 2);
3320 GEN8_IRQ_RESET_NDX(GT, 3);
3321 }
3322
3323 static void gen8_irq_reset(struct drm_device *dev)
3324 {
3325 struct drm_i915_private *dev_priv = to_i915(dev);
3326 int pipe;
3327
3328 I915_WRITE(GEN8_MASTER_IRQ, 0);
3329 POSTING_READ(GEN8_MASTER_IRQ);
3330
3331 gen8_gt_irq_reset(dev_priv);
3332
3333 for_each_pipe(dev_priv, pipe)
3334 if (intel_display_power_is_enabled(dev_priv,
3335 POWER_DOMAIN_PIPE(pipe)))
3336 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3337
3338 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3339 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3340 GEN5_IRQ_RESET(GEN8_PCU_);
3341
3342 if (HAS_PCH_SPLIT(dev))
3343 ibx_irq_reset(dev);
3344 }
3345
3346 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
3347 unsigned int pipe_mask)
3348 {
3349 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3350 enum pipe pipe;
3351
3352 spin_lock_irq(&dev_priv->irq_lock);
3353 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3354 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3355 dev_priv->de_irq_mask[pipe],
3356 ~dev_priv->de_irq_mask[pipe] | extra_ier);
3357 spin_unlock_irq(&dev_priv->irq_lock);
3358 }
3359
3360 void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
3361 unsigned int pipe_mask)
3362 {
3363 enum pipe pipe;
3364
3365 spin_lock_irq(&dev_priv->irq_lock);
3366 for_each_pipe_masked(dev_priv, pipe, pipe_mask)
3367 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3368 spin_unlock_irq(&dev_priv->irq_lock);
3369
3370 /* make sure we're done processing display irqs */
3371 synchronize_irq(dev_priv->drm.irq);
3372 }
3373
3374 static void cherryview_irq_preinstall(struct drm_device *dev)
3375 {
3376 struct drm_i915_private *dev_priv = to_i915(dev);
3377
3378 I915_WRITE(GEN8_MASTER_IRQ, 0);
3379 POSTING_READ(GEN8_MASTER_IRQ);
3380
3381 gen8_gt_irq_reset(dev_priv);
3382
3383 GEN5_IRQ_RESET(GEN8_PCU_);
3384
3385 spin_lock_irq(&dev_priv->irq_lock);
3386 if (dev_priv->display_irqs_enabled)
3387 vlv_display_irq_reset(dev_priv);
3388 spin_unlock_irq(&dev_priv->irq_lock);
3389 }
3390
3391 static u32 intel_hpd_enabled_irqs(struct drm_i915_private *dev_priv,
3392 const u32 hpd[HPD_NUM_PINS])
3393 {
3394 struct intel_encoder *encoder;
3395 u32 enabled_irqs = 0;
3396
3397 for_each_intel_encoder(&dev_priv->drm, encoder)
3398 if (dev_priv->hotplug.stats[encoder->hpd_pin].state == HPD_ENABLED)
3399 enabled_irqs |= hpd[encoder->hpd_pin];
3400
3401 return enabled_irqs;
3402 }
3403
3404 static void ibx_hpd_irq_setup(struct drm_i915_private *dev_priv)
3405 {
3406 u32 hotplug_irqs, hotplug, enabled_irqs;
3407
3408 if (HAS_PCH_IBX(dev_priv)) {
3409 hotplug_irqs = SDE_HOTPLUG_MASK;
3410 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ibx);
3411 } else {
3412 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3413 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_cpt);
3414 }
3415
3416 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3417
3418 /*
3419 * Enable digital hotplug on the PCH, and configure the DP short pulse
3420 * duration to 2ms (which is the minimum in the Display Port spec).
3421 * The pulse duration bits are reserved on LPT+.
3422 */
3423 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3424 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3425 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3426 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3427 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3428 /*
3429 * When CPU and PCH are on the same package, port A
3430 * HPD must be enabled in both north and south.
3431 */
3432 if (HAS_PCH_LPT_LP(dev_priv))
3433 hotplug |= PORTA_HOTPLUG_ENABLE;
3434 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3435 }
3436
3437 static void spt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3438 {
3439 u32 hotplug_irqs, hotplug, enabled_irqs;
3440
3441 hotplug_irqs = SDE_HOTPLUG_MASK_SPT;
3442 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_spt);
3443
3444 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3445
3446 /* Enable digital hotplug on the PCH */
3447 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3448 hotplug |= PORTD_HOTPLUG_ENABLE | PORTC_HOTPLUG_ENABLE |
3449 PORTB_HOTPLUG_ENABLE | PORTA_HOTPLUG_ENABLE;
3450 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3451
3452 hotplug = I915_READ(PCH_PORT_HOTPLUG2);
3453 hotplug |= PORTE_HOTPLUG_ENABLE;
3454 I915_WRITE(PCH_PORT_HOTPLUG2, hotplug);
3455 }
3456
3457 static void ilk_hpd_irq_setup(struct drm_i915_private *dev_priv)
3458 {
3459 u32 hotplug_irqs, hotplug, enabled_irqs;
3460
3461 if (INTEL_GEN(dev_priv) >= 8) {
3462 hotplug_irqs = GEN8_PORT_DP_A_HOTPLUG;
3463 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bdw);
3464
3465 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3466 } else if (INTEL_GEN(dev_priv) >= 7) {
3467 hotplug_irqs = DE_DP_A_HOTPLUG_IVB;
3468 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ivb);
3469
3470 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3471 } else {
3472 hotplug_irqs = DE_DP_A_HOTPLUG;
3473 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_ilk);
3474
3475 ilk_update_display_irq(dev_priv, hotplug_irqs, enabled_irqs);
3476 }
3477
3478 /*
3479 * Enable digital hotplug on the CPU, and configure the DP short pulse
3480 * duration to 2ms (which is the minimum in the Display Port spec)
3481 * The pulse duration bits are reserved on HSW+.
3482 */
3483 hotplug = I915_READ(DIGITAL_PORT_HOTPLUG_CNTRL);
3484 hotplug &= ~DIGITAL_PORTA_PULSE_DURATION_MASK;
3485 hotplug |= DIGITAL_PORTA_HOTPLUG_ENABLE | DIGITAL_PORTA_PULSE_DURATION_2ms;
3486 I915_WRITE(DIGITAL_PORT_HOTPLUG_CNTRL, hotplug);
3487
3488 ibx_hpd_irq_setup(dev_priv);
3489 }
3490
3491 static void bxt_hpd_irq_setup(struct drm_i915_private *dev_priv)
3492 {
3493 u32 hotplug_irqs, hotplug, enabled_irqs;
3494
3495 enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_bxt);
3496 hotplug_irqs = BXT_DE_PORT_HOTPLUG_MASK;
3497
3498 bdw_update_port_irq(dev_priv, hotplug_irqs, enabled_irqs);
3499
3500 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3501 hotplug |= PORTC_HOTPLUG_ENABLE | PORTB_HOTPLUG_ENABLE |
3502 PORTA_HOTPLUG_ENABLE;
3503
3504 DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
3505 hotplug, enabled_irqs);
3506 hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
3507
3508 /*
3509 * For BXT invert bit has to be set based on AOB design
3510 * for HPD detection logic, update it based on VBT fields.
3511 */
3512
3513 if ((enabled_irqs & BXT_DE_PORT_HP_DDIA) &&
3514 intel_bios_is_port_hpd_inverted(dev_priv, PORT_A))
3515 hotplug |= BXT_DDIA_HPD_INVERT;
3516 if ((enabled_irqs & BXT_DE_PORT_HP_DDIB) &&
3517 intel_bios_is_port_hpd_inverted(dev_priv, PORT_B))
3518 hotplug |= BXT_DDIB_HPD_INVERT;
3519 if ((enabled_irqs & BXT_DE_PORT_HP_DDIC) &&
3520 intel_bios_is_port_hpd_inverted(dev_priv, PORT_C))
3521 hotplug |= BXT_DDIC_HPD_INVERT;
3522
3523 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3524 }
3525
3526 static void ibx_irq_postinstall(struct drm_device *dev)
3527 {
3528 struct drm_i915_private *dev_priv = to_i915(dev);
3529 u32 mask;
3530
3531 if (HAS_PCH_NOP(dev))
3532 return;
3533
3534 if (HAS_PCH_IBX(dev))
3535 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3536 else
3537 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3538
3539 gen5_assert_iir_is_zero(dev_priv, SDEIIR);
3540 I915_WRITE(SDEIMR, ~mask);
3541 }
3542
3543 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3544 {
3545 struct drm_i915_private *dev_priv = to_i915(dev);
3546 u32 pm_irqs, gt_irqs;
3547
3548 pm_irqs = gt_irqs = 0;
3549
3550 dev_priv->gt_irq_mask = ~0;
3551 if (HAS_L3_DPF(dev)) {
3552 /* L3 parity interrupt is always unmasked. */
3553 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3554 gt_irqs |= GT_PARITY_ERROR(dev);
3555 }
3556
3557 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3558 if (IS_GEN5(dev)) {
3559 gt_irqs |= ILK_BSD_USER_INTERRUPT;
3560 } else {
3561 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3562 }
3563
3564 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3565
3566 if (INTEL_INFO(dev)->gen >= 6) {
3567 /*
3568 * RPS interrupts will get enabled/disabled on demand when RPS
3569 * itself is enabled/disabled.
3570 */
3571 if (HAS_VEBOX(dev))
3572 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3573
3574 dev_priv->pm_irq_mask = 0xffffffff;
3575 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3576 }
3577 }
3578
3579 static int ironlake_irq_postinstall(struct drm_device *dev)
3580 {
3581 struct drm_i915_private *dev_priv = to_i915(dev);
3582 u32 display_mask, extra_mask;
3583
3584 if (INTEL_INFO(dev)->gen >= 7) {
3585 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3586 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3587 DE_PLANEB_FLIP_DONE_IVB |
3588 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3589 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3590 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB |
3591 DE_DP_A_HOTPLUG_IVB);
3592 } else {
3593 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3594 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3595 DE_AUX_CHANNEL_A |
3596 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3597 DE_POISON);
3598 extra_mask = (DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3599 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
3600 DE_DP_A_HOTPLUG);
3601 }
3602
3603 dev_priv->irq_mask = ~display_mask;
3604
3605 I915_WRITE(HWSTAM, 0xeffe);
3606
3607 ibx_irq_pre_postinstall(dev);
3608
3609 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3610
3611 gen5_gt_irq_postinstall(dev);
3612
3613 ibx_irq_postinstall(dev);
3614
3615 if (IS_IRONLAKE_M(dev)) {
3616 /* Enable PCU event interrupts
3617 *
3618 * spinlocking not required here for correctness since interrupt
3619 * setup is guaranteed to run in single-threaded context. But we
3620 * need it to make the assert_spin_locked happy. */
3621 spin_lock_irq(&dev_priv->irq_lock);
3622 ilk_enable_display_irq(dev_priv, DE_PCU_EVENT);
3623 spin_unlock_irq(&dev_priv->irq_lock);
3624 }
3625
3626 return 0;
3627 }
3628
3629 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3630 {
3631 assert_spin_locked(&dev_priv->irq_lock);
3632
3633 if (dev_priv->display_irqs_enabled)
3634 return;
3635
3636 dev_priv->display_irqs_enabled = true;
3637
3638 if (intel_irqs_enabled(dev_priv)) {
3639 vlv_display_irq_reset(dev_priv);
3640 vlv_display_irq_postinstall(dev_priv);
3641 }
3642 }
3643
3644 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3645 {
3646 assert_spin_locked(&dev_priv->irq_lock);
3647
3648 if (!dev_priv->display_irqs_enabled)
3649 return;
3650
3651 dev_priv->display_irqs_enabled = false;
3652
3653 if (intel_irqs_enabled(dev_priv))
3654 vlv_display_irq_reset(dev_priv);
3655 }
3656
3657
3658 static int valleyview_irq_postinstall(struct drm_device *dev)
3659 {
3660 struct drm_i915_private *dev_priv = to_i915(dev);
3661
3662 gen5_gt_irq_postinstall(dev);
3663
3664 spin_lock_irq(&dev_priv->irq_lock);
3665 if (dev_priv->display_irqs_enabled)
3666 vlv_display_irq_postinstall(dev_priv);
3667 spin_unlock_irq(&dev_priv->irq_lock);
3668
3669 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3670 POSTING_READ(VLV_MASTER_IER);
3671
3672 return 0;
3673 }
3674
3675 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3676 {
3677 /* These are interrupts we'll toggle with the ring mask register */
3678 uint32_t gt_interrupts[] = {
3679 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3680 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3681 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3682 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3683 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3684 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3685 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3686 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3687 0,
3688 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3689 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3690 };
3691
3692 if (HAS_L3_DPF(dev_priv))
3693 gt_interrupts[0] |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
3694
3695 dev_priv->pm_irq_mask = 0xffffffff;
3696 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3697 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3698 /*
3699 * RPS interrupts will get enabled/disabled on demand when RPS itself
3700 * is enabled/disabled.
3701 */
3702 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3703 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3704 }
3705
3706 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3707 {
3708 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3709 uint32_t de_pipe_enables;
3710 u32 de_port_masked = GEN8_AUX_CHANNEL_A;
3711 u32 de_port_enables;
3712 u32 de_misc_masked = GEN8_DE_MISC_GSE;
3713 enum pipe pipe;
3714
3715 if (INTEL_INFO(dev_priv)->gen >= 9) {
3716 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3717 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3718 de_port_masked |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3719 GEN9_AUX_CHANNEL_D;
3720 if (IS_BROXTON(dev_priv))
3721 de_port_masked |= BXT_DE_PORT_GMBUS;
3722 } else {
3723 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3724 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3725 }
3726
3727 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3728 GEN8_PIPE_FIFO_UNDERRUN;
3729
3730 de_port_enables = de_port_masked;
3731 if (IS_BROXTON(dev_priv))
3732 de_port_enables |= BXT_DE_PORT_HOTPLUG_MASK;
3733 else if (IS_BROADWELL(dev_priv))
3734 de_port_enables |= GEN8_PORT_DP_A_HOTPLUG;
3735
3736 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3737 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3738 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3739
3740 for_each_pipe(dev_priv, pipe)
3741 if (intel_display_power_is_enabled(dev_priv,
3742 POWER_DOMAIN_PIPE(pipe)))
3743 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3744 dev_priv->de_irq_mask[pipe],
3745 de_pipe_enables);
3746
3747 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~de_port_masked, de_port_enables);
3748 GEN5_IRQ_INIT(GEN8_DE_MISC_, ~de_misc_masked, de_misc_masked);
3749 }
3750
3751 static int gen8_irq_postinstall(struct drm_device *dev)
3752 {
3753 struct drm_i915_private *dev_priv = to_i915(dev);
3754
3755 if (HAS_PCH_SPLIT(dev))
3756 ibx_irq_pre_postinstall(dev);
3757
3758 gen8_gt_irq_postinstall(dev_priv);
3759 gen8_de_irq_postinstall(dev_priv);
3760
3761 if (HAS_PCH_SPLIT(dev))
3762 ibx_irq_postinstall(dev);
3763
3764 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3765 POSTING_READ(GEN8_MASTER_IRQ);
3766
3767 return 0;
3768 }
3769
3770 static int cherryview_irq_postinstall(struct drm_device *dev)
3771 {
3772 struct drm_i915_private *dev_priv = to_i915(dev);
3773
3774 gen8_gt_irq_postinstall(dev_priv);
3775
3776 spin_lock_irq(&dev_priv->irq_lock);
3777 if (dev_priv->display_irqs_enabled)
3778 vlv_display_irq_postinstall(dev_priv);
3779 spin_unlock_irq(&dev_priv->irq_lock);
3780
3781 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
3782 POSTING_READ(GEN8_MASTER_IRQ);
3783
3784 return 0;
3785 }
3786
3787 static void gen8_irq_uninstall(struct drm_device *dev)
3788 {
3789 struct drm_i915_private *dev_priv = to_i915(dev);
3790
3791 if (!dev_priv)
3792 return;
3793
3794 gen8_irq_reset(dev);
3795 }
3796
3797 static void valleyview_irq_uninstall(struct drm_device *dev)
3798 {
3799 struct drm_i915_private *dev_priv = to_i915(dev);
3800
3801 if (!dev_priv)
3802 return;
3803
3804 I915_WRITE(VLV_MASTER_IER, 0);
3805 POSTING_READ(VLV_MASTER_IER);
3806
3807 gen5_gt_irq_reset(dev);
3808
3809 I915_WRITE(HWSTAM, 0xffffffff);
3810
3811 spin_lock_irq(&dev_priv->irq_lock);
3812 if (dev_priv->display_irqs_enabled)
3813 vlv_display_irq_reset(dev_priv);
3814 spin_unlock_irq(&dev_priv->irq_lock);
3815 }
3816
3817 static void cherryview_irq_uninstall(struct drm_device *dev)
3818 {
3819 struct drm_i915_private *dev_priv = to_i915(dev);
3820
3821 if (!dev_priv)
3822 return;
3823
3824 I915_WRITE(GEN8_MASTER_IRQ, 0);
3825 POSTING_READ(GEN8_MASTER_IRQ);
3826
3827 gen8_gt_irq_reset(dev_priv);
3828
3829 GEN5_IRQ_RESET(GEN8_PCU_);
3830
3831 spin_lock_irq(&dev_priv->irq_lock);
3832 if (dev_priv->display_irqs_enabled)
3833 vlv_display_irq_reset(dev_priv);
3834 spin_unlock_irq(&dev_priv->irq_lock);
3835 }
3836
3837 static void ironlake_irq_uninstall(struct drm_device *dev)
3838 {
3839 struct drm_i915_private *dev_priv = to_i915(dev);
3840
3841 if (!dev_priv)
3842 return;
3843
3844 ironlake_irq_reset(dev);
3845 }
3846
3847 static void i8xx_irq_preinstall(struct drm_device * dev)
3848 {
3849 struct drm_i915_private *dev_priv = to_i915(dev);
3850 int pipe;
3851
3852 for_each_pipe(dev_priv, pipe)
3853 I915_WRITE(PIPESTAT(pipe), 0);
3854 I915_WRITE16(IMR, 0xffff);
3855 I915_WRITE16(IER, 0x0);
3856 POSTING_READ16(IER);
3857 }
3858
3859 static int i8xx_irq_postinstall(struct drm_device *dev)
3860 {
3861 struct drm_i915_private *dev_priv = to_i915(dev);
3862
3863 I915_WRITE16(EMR,
3864 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3865
3866 /* Unmask the interrupts that we always want on. */
3867 dev_priv->irq_mask =
3868 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3869 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3870 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3871 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
3872 I915_WRITE16(IMR, dev_priv->irq_mask);
3873
3874 I915_WRITE16(IER,
3875 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3876 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3877 I915_USER_INTERRUPT);
3878 POSTING_READ16(IER);
3879
3880 /* Interrupt setup is already guaranteed to be single-threaded, this is
3881 * just to make the assert_spin_locked check happy. */
3882 spin_lock_irq(&dev_priv->irq_lock);
3883 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3884 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3885 spin_unlock_irq(&dev_priv->irq_lock);
3886
3887 return 0;
3888 }
3889
3890 /*
3891 * Returns true when a page flip has completed.
3892 */
3893 static bool i8xx_handle_vblank(struct drm_i915_private *dev_priv,
3894 int plane, int pipe, u32 iir)
3895 {
3896 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3897
3898 if (!intel_pipe_handle_vblank(dev_priv, pipe))
3899 return false;
3900
3901 if ((iir & flip_pending) == 0)
3902 goto check_page_flip;
3903
3904 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3905 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3906 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3907 * the flip is completed (no longer pending). Since this doesn't raise
3908 * an interrupt per se, we watch for the change at vblank.
3909 */
3910 if (I915_READ16(ISR) & flip_pending)
3911 goto check_page_flip;
3912
3913 intel_finish_page_flip_cs(dev_priv, pipe);
3914 return true;
3915
3916 check_page_flip:
3917 intel_check_page_flip(dev_priv, pipe);
3918 return false;
3919 }
3920
3921 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3922 {
3923 struct drm_device *dev = arg;
3924 struct drm_i915_private *dev_priv = to_i915(dev);
3925 u16 iir, new_iir;
3926 u32 pipe_stats[2];
3927 int pipe;
3928 u16 flip_mask =
3929 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3930 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3931 irqreturn_t ret;
3932
3933 if (!intel_irqs_enabled(dev_priv))
3934 return IRQ_NONE;
3935
3936 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
3937 disable_rpm_wakeref_asserts(dev_priv);
3938
3939 ret = IRQ_NONE;
3940 iir = I915_READ16(IIR);
3941 if (iir == 0)
3942 goto out;
3943
3944 while (iir & ~flip_mask) {
3945 /* Can't rely on pipestat interrupt bit in iir as it might
3946 * have been cleared after the pipestat interrupt was received.
3947 * It doesn't set the bit in iir again, but it still produces
3948 * interrupts (for non-MSI).
3949 */
3950 spin_lock(&dev_priv->irq_lock);
3951 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3952 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3953
3954 for_each_pipe(dev_priv, pipe) {
3955 i915_reg_t reg = PIPESTAT(pipe);
3956 pipe_stats[pipe] = I915_READ(reg);
3957
3958 /*
3959 * Clear the PIPE*STAT regs before the IIR
3960 */
3961 if (pipe_stats[pipe] & 0x8000ffff)
3962 I915_WRITE(reg, pipe_stats[pipe]);
3963 }
3964 spin_unlock(&dev_priv->irq_lock);
3965
3966 I915_WRITE16(IIR, iir & ~flip_mask);
3967 new_iir = I915_READ16(IIR); /* Flush posted writes */
3968
3969 if (iir & I915_USER_INTERRUPT)
3970 notify_ring(&dev_priv->engine[RCS]);
3971
3972 for_each_pipe(dev_priv, pipe) {
3973 int plane = pipe;
3974 if (HAS_FBC(dev_priv))
3975 plane = !plane;
3976
3977 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3978 i8xx_handle_vblank(dev_priv, plane, pipe, iir))
3979 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3980
3981 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3982 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
3983
3984 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3985 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3986 pipe);
3987 }
3988
3989 iir = new_iir;
3990 }
3991 ret = IRQ_HANDLED;
3992
3993 out:
3994 enable_rpm_wakeref_asserts(dev_priv);
3995
3996 return ret;
3997 }
3998
3999 static void i8xx_irq_uninstall(struct drm_device * dev)
4000 {
4001 struct drm_i915_private *dev_priv = to_i915(dev);
4002 int pipe;
4003
4004 for_each_pipe(dev_priv, pipe) {
4005 /* Clear enable bits; then clear status bits */
4006 I915_WRITE(PIPESTAT(pipe), 0);
4007 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4008 }
4009 I915_WRITE16(IMR, 0xffff);
4010 I915_WRITE16(IER, 0x0);
4011 I915_WRITE16(IIR, I915_READ16(IIR));
4012 }
4013
4014 static void i915_irq_preinstall(struct drm_device * dev)
4015 {
4016 struct drm_i915_private *dev_priv = to_i915(dev);
4017 int pipe;
4018
4019 if (I915_HAS_HOTPLUG(dev)) {
4020 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4021 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4022 }
4023
4024 I915_WRITE16(HWSTAM, 0xeffe);
4025 for_each_pipe(dev_priv, pipe)
4026 I915_WRITE(PIPESTAT(pipe), 0);
4027 I915_WRITE(IMR, 0xffffffff);
4028 I915_WRITE(IER, 0x0);
4029 POSTING_READ(IER);
4030 }
4031
4032 static int i915_irq_postinstall(struct drm_device *dev)
4033 {
4034 struct drm_i915_private *dev_priv = to_i915(dev);
4035 u32 enable_mask;
4036
4037 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
4038
4039 /* Unmask the interrupts that we always want on. */
4040 dev_priv->irq_mask =
4041 ~(I915_ASLE_INTERRUPT |
4042 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4043 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4044 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4045 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4046
4047 enable_mask =
4048 I915_ASLE_INTERRUPT |
4049 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4050 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4051 I915_USER_INTERRUPT;
4052
4053 if (I915_HAS_HOTPLUG(dev)) {
4054 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4055 POSTING_READ(PORT_HOTPLUG_EN);
4056
4057 /* Enable in IER... */
4058 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
4059 /* and unmask in IMR */
4060 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
4061 }
4062
4063 I915_WRITE(IMR, dev_priv->irq_mask);
4064 I915_WRITE(IER, enable_mask);
4065 POSTING_READ(IER);
4066
4067 i915_enable_asle_pipestat(dev_priv);
4068
4069 /* Interrupt setup is already guaranteed to be single-threaded, this is
4070 * just to make the assert_spin_locked check happy. */
4071 spin_lock_irq(&dev_priv->irq_lock);
4072 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4073 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4074 spin_unlock_irq(&dev_priv->irq_lock);
4075
4076 return 0;
4077 }
4078
4079 /*
4080 * Returns true when a page flip has completed.
4081 */
4082 static bool i915_handle_vblank(struct drm_i915_private *dev_priv,
4083 int plane, int pipe, u32 iir)
4084 {
4085 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
4086
4087 if (!intel_pipe_handle_vblank(dev_priv, pipe))
4088 return false;
4089
4090 if ((iir & flip_pending) == 0)
4091 goto check_page_flip;
4092
4093 /* We detect FlipDone by looking for the change in PendingFlip from '1'
4094 * to '0' on the following vblank, i.e. IIR has the Pendingflip
4095 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
4096 * the flip is completed (no longer pending). Since this doesn't raise
4097 * an interrupt per se, we watch for the change at vblank.
4098 */
4099 if (I915_READ(ISR) & flip_pending)
4100 goto check_page_flip;
4101
4102 intel_finish_page_flip_cs(dev_priv, pipe);
4103 return true;
4104
4105 check_page_flip:
4106 intel_check_page_flip(dev_priv, pipe);
4107 return false;
4108 }
4109
4110 static irqreturn_t i915_irq_handler(int irq, void *arg)
4111 {
4112 struct drm_device *dev = arg;
4113 struct drm_i915_private *dev_priv = to_i915(dev);
4114 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
4115 u32 flip_mask =
4116 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4117 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4118 int pipe, ret = IRQ_NONE;
4119
4120 if (!intel_irqs_enabled(dev_priv))
4121 return IRQ_NONE;
4122
4123 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4124 disable_rpm_wakeref_asserts(dev_priv);
4125
4126 iir = I915_READ(IIR);
4127 do {
4128 bool irq_received = (iir & ~flip_mask) != 0;
4129 bool blc_event = false;
4130
4131 /* Can't rely on pipestat interrupt bit in iir as it might
4132 * have been cleared after the pipestat interrupt was received.
4133 * It doesn't set the bit in iir again, but it still produces
4134 * interrupts (for non-MSI).
4135 */
4136 spin_lock(&dev_priv->irq_lock);
4137 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4138 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4139
4140 for_each_pipe(dev_priv, pipe) {
4141 i915_reg_t reg = PIPESTAT(pipe);
4142 pipe_stats[pipe] = I915_READ(reg);
4143
4144 /* Clear the PIPE*STAT regs before the IIR */
4145 if (pipe_stats[pipe] & 0x8000ffff) {
4146 I915_WRITE(reg, pipe_stats[pipe]);
4147 irq_received = true;
4148 }
4149 }
4150 spin_unlock(&dev_priv->irq_lock);
4151
4152 if (!irq_received)
4153 break;
4154
4155 /* Consume port. Then clear IIR or we'll miss events */
4156 if (I915_HAS_HOTPLUG(dev_priv) &&
4157 iir & I915_DISPLAY_PORT_INTERRUPT) {
4158 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4159 if (hotplug_status)
4160 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4161 }
4162
4163 I915_WRITE(IIR, iir & ~flip_mask);
4164 new_iir = I915_READ(IIR); /* Flush posted writes */
4165
4166 if (iir & I915_USER_INTERRUPT)
4167 notify_ring(&dev_priv->engine[RCS]);
4168
4169 for_each_pipe(dev_priv, pipe) {
4170 int plane = pipe;
4171 if (HAS_FBC(dev_priv))
4172 plane = !plane;
4173
4174 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4175 i915_handle_vblank(dev_priv, plane, pipe, iir))
4176 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4177
4178 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4179 blc_event = true;
4180
4181 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4182 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4183
4184 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4185 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4186 pipe);
4187 }
4188
4189 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4190 intel_opregion_asle_intr(dev_priv);
4191
4192 /* With MSI, interrupts are only generated when iir
4193 * transitions from zero to nonzero. If another bit got
4194 * set while we were handling the existing iir bits, then
4195 * we would never get another interrupt.
4196 *
4197 * This is fine on non-MSI as well, as if we hit this path
4198 * we avoid exiting the interrupt handler only to generate
4199 * another one.
4200 *
4201 * Note that for MSI this could cause a stray interrupt report
4202 * if an interrupt landed in the time between writing IIR and
4203 * the posting read. This should be rare enough to never
4204 * trigger the 99% of 100,000 interrupts test for disabling
4205 * stray interrupts.
4206 */
4207 ret = IRQ_HANDLED;
4208 iir = new_iir;
4209 } while (iir & ~flip_mask);
4210
4211 enable_rpm_wakeref_asserts(dev_priv);
4212
4213 return ret;
4214 }
4215
4216 static void i915_irq_uninstall(struct drm_device * dev)
4217 {
4218 struct drm_i915_private *dev_priv = to_i915(dev);
4219 int pipe;
4220
4221 if (I915_HAS_HOTPLUG(dev)) {
4222 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4223 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4224 }
4225
4226 I915_WRITE16(HWSTAM, 0xffff);
4227 for_each_pipe(dev_priv, pipe) {
4228 /* Clear enable bits; then clear status bits */
4229 I915_WRITE(PIPESTAT(pipe), 0);
4230 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4231 }
4232 I915_WRITE(IMR, 0xffffffff);
4233 I915_WRITE(IER, 0x0);
4234
4235 I915_WRITE(IIR, I915_READ(IIR));
4236 }
4237
4238 static void i965_irq_preinstall(struct drm_device * dev)
4239 {
4240 struct drm_i915_private *dev_priv = to_i915(dev);
4241 int pipe;
4242
4243 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4244 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4245
4246 I915_WRITE(HWSTAM, 0xeffe);
4247 for_each_pipe(dev_priv, pipe)
4248 I915_WRITE(PIPESTAT(pipe), 0);
4249 I915_WRITE(IMR, 0xffffffff);
4250 I915_WRITE(IER, 0x0);
4251 POSTING_READ(IER);
4252 }
4253
4254 static int i965_irq_postinstall(struct drm_device *dev)
4255 {
4256 struct drm_i915_private *dev_priv = to_i915(dev);
4257 u32 enable_mask;
4258 u32 error_mask;
4259
4260 /* Unmask the interrupts that we always want on. */
4261 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4262 I915_DISPLAY_PORT_INTERRUPT |
4263 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4264 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4265 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4266 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4267 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4268
4269 enable_mask = ~dev_priv->irq_mask;
4270 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4271 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4272 enable_mask |= I915_USER_INTERRUPT;
4273
4274 if (IS_G4X(dev_priv))
4275 enable_mask |= I915_BSD_USER_INTERRUPT;
4276
4277 /* Interrupt setup is already guaranteed to be single-threaded, this is
4278 * just to make the assert_spin_locked check happy. */
4279 spin_lock_irq(&dev_priv->irq_lock);
4280 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4281 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4282 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4283 spin_unlock_irq(&dev_priv->irq_lock);
4284
4285 /*
4286 * Enable some error detection, note the instruction error mask
4287 * bit is reserved, so we leave it masked.
4288 */
4289 if (IS_G4X(dev_priv)) {
4290 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4291 GM45_ERROR_MEM_PRIV |
4292 GM45_ERROR_CP_PRIV |
4293 I915_ERROR_MEMORY_REFRESH);
4294 } else {
4295 error_mask = ~(I915_ERROR_PAGE_TABLE |
4296 I915_ERROR_MEMORY_REFRESH);
4297 }
4298 I915_WRITE(EMR, error_mask);
4299
4300 I915_WRITE(IMR, dev_priv->irq_mask);
4301 I915_WRITE(IER, enable_mask);
4302 POSTING_READ(IER);
4303
4304 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4305 POSTING_READ(PORT_HOTPLUG_EN);
4306
4307 i915_enable_asle_pipestat(dev_priv);
4308
4309 return 0;
4310 }
4311
4312 static void i915_hpd_irq_setup(struct drm_i915_private *dev_priv)
4313 {
4314 u32 hotplug_en;
4315
4316 assert_spin_locked(&dev_priv->irq_lock);
4317
4318 /* Note HDMI and DP share hotplug bits */
4319 /* enable bits are the same for all generations */
4320 hotplug_en = intel_hpd_enabled_irqs(dev_priv, hpd_mask_i915);
4321 /* Programming the CRT detection parameters tends
4322 to generate a spurious hotplug event about three
4323 seconds later. So just do it once.
4324 */
4325 if (IS_G4X(dev_priv))
4326 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4327 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4328
4329 /* Ignore TV since it's buggy */
4330 i915_hotplug_interrupt_update_locked(dev_priv,
4331 HOTPLUG_INT_EN_MASK |
4332 CRT_HOTPLUG_VOLTAGE_COMPARE_MASK |
4333 CRT_HOTPLUG_ACTIVATION_PERIOD_64,
4334 hotplug_en);
4335 }
4336
4337 static irqreturn_t i965_irq_handler(int irq, void *arg)
4338 {
4339 struct drm_device *dev = arg;
4340 struct drm_i915_private *dev_priv = to_i915(dev);
4341 u32 iir, new_iir;
4342 u32 pipe_stats[I915_MAX_PIPES];
4343 int ret = IRQ_NONE, pipe;
4344 u32 flip_mask =
4345 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4346 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4347
4348 if (!intel_irqs_enabled(dev_priv))
4349 return IRQ_NONE;
4350
4351 /* IRQs are synced during runtime_suspend, we don't require a wakeref */
4352 disable_rpm_wakeref_asserts(dev_priv);
4353
4354 iir = I915_READ(IIR);
4355
4356 for (;;) {
4357 bool irq_received = (iir & ~flip_mask) != 0;
4358 bool blc_event = false;
4359
4360 /* Can't rely on pipestat interrupt bit in iir as it might
4361 * have been cleared after the pipestat interrupt was received.
4362 * It doesn't set the bit in iir again, but it still produces
4363 * interrupts (for non-MSI).
4364 */
4365 spin_lock(&dev_priv->irq_lock);
4366 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4367 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4368
4369 for_each_pipe(dev_priv, pipe) {
4370 i915_reg_t reg = PIPESTAT(pipe);
4371 pipe_stats[pipe] = I915_READ(reg);
4372
4373 /*
4374 * Clear the PIPE*STAT regs before the IIR
4375 */
4376 if (pipe_stats[pipe] & 0x8000ffff) {
4377 I915_WRITE(reg, pipe_stats[pipe]);
4378 irq_received = true;
4379 }
4380 }
4381 spin_unlock(&dev_priv->irq_lock);
4382
4383 if (!irq_received)
4384 break;
4385
4386 ret = IRQ_HANDLED;
4387
4388 /* Consume port. Then clear IIR or we'll miss events */
4389 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
4390 u32 hotplug_status = i9xx_hpd_irq_ack(dev_priv);
4391 if (hotplug_status)
4392 i9xx_hpd_irq_handler(dev_priv, hotplug_status);
4393 }
4394
4395 I915_WRITE(IIR, iir & ~flip_mask);
4396 new_iir = I915_READ(IIR); /* Flush posted writes */
4397
4398 if (iir & I915_USER_INTERRUPT)
4399 notify_ring(&dev_priv->engine[RCS]);
4400 if (iir & I915_BSD_USER_INTERRUPT)
4401 notify_ring(&dev_priv->engine[VCS]);
4402
4403 for_each_pipe(dev_priv, pipe) {
4404 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4405 i915_handle_vblank(dev_priv, pipe, pipe, iir))
4406 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4407
4408 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4409 blc_event = true;
4410
4411 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4412 i9xx_pipe_crc_irq_handler(dev_priv, pipe);
4413
4414 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4415 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4416 }
4417
4418 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4419 intel_opregion_asle_intr(dev_priv);
4420
4421 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4422 gmbus_irq_handler(dev_priv);
4423
4424 /* With MSI, interrupts are only generated when iir
4425 * transitions from zero to nonzero. If another bit got
4426 * set while we were handling the existing iir bits, then
4427 * we would never get another interrupt.
4428 *
4429 * This is fine on non-MSI as well, as if we hit this path
4430 * we avoid exiting the interrupt handler only to generate
4431 * another one.
4432 *
4433 * Note that for MSI this could cause a stray interrupt report
4434 * if an interrupt landed in the time between writing IIR and
4435 * the posting read. This should be rare enough to never
4436 * trigger the 99% of 100,000 interrupts test for disabling
4437 * stray interrupts.
4438 */
4439 iir = new_iir;
4440 }
4441
4442 enable_rpm_wakeref_asserts(dev_priv);
4443
4444 return ret;
4445 }
4446
4447 static void i965_irq_uninstall(struct drm_device * dev)
4448 {
4449 struct drm_i915_private *dev_priv = to_i915(dev);
4450 int pipe;
4451
4452 if (!dev_priv)
4453 return;
4454
4455 i915_hotplug_interrupt_update(dev_priv, 0xffffffff, 0);
4456 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4457
4458 I915_WRITE(HWSTAM, 0xffffffff);
4459 for_each_pipe(dev_priv, pipe)
4460 I915_WRITE(PIPESTAT(pipe), 0);
4461 I915_WRITE(IMR, 0xffffffff);
4462 I915_WRITE(IER, 0x0);
4463
4464 for_each_pipe(dev_priv, pipe)
4465 I915_WRITE(PIPESTAT(pipe),
4466 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4467 I915_WRITE(IIR, I915_READ(IIR));
4468 }
4469
4470 /**
4471 * intel_irq_init - initializes irq support
4472 * @dev_priv: i915 device instance
4473 *
4474 * This function initializes all the irq support including work items, timers
4475 * and all the vtables. It does not setup the interrupt itself though.
4476 */
4477 void intel_irq_init(struct drm_i915_private *dev_priv)
4478 {
4479 struct drm_device *dev = &dev_priv->drm;
4480
4481 intel_hpd_init_work(dev_priv);
4482
4483 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4484 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4485
4486 /* Let's track the enabled rps events */
4487 if (IS_VALLEYVIEW(dev_priv))
4488 /* WaGsvRC0ResidencyMethod:vlv */
4489 dev_priv->pm_rps_events = GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED;
4490 else
4491 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4492
4493 dev_priv->rps.pm_intr_keep = 0;
4494
4495 /*
4496 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
4497 * if GEN6_PM_UP_EI_EXPIRED is masked.
4498 *
4499 * TODO: verify if this can be reproduced on VLV,CHV.
4500 */
4501 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
4502 dev_priv->rps.pm_intr_keep |= GEN6_PM_RP_UP_EI_EXPIRED;
4503
4504 if (INTEL_INFO(dev_priv)->gen >= 8)
4505 dev_priv->rps.pm_intr_keep |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
4506
4507 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4508 i915_hangcheck_elapsed);
4509
4510 if (IS_GEN2(dev_priv)) {
4511 /* Gen2 doesn't have a hardware frame counter */
4512 dev->max_vblank_count = 0;
4513 dev->driver->get_vblank_counter = drm_vblank_no_hw_counter;
4514 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4515 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4516 dev->driver->get_vblank_counter = g4x_get_vblank_counter;
4517 } else {
4518 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4519 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4520 }
4521
4522 /*
4523 * Opt out of the vblank disable timer on everything except gen2.
4524 * Gen2 doesn't have a hardware frame counter and so depends on
4525 * vblank interrupts to produce sane vblank seuquence numbers.
4526 */
4527 if (!IS_GEN2(dev_priv))
4528 dev->vblank_disable_immediate = true;
4529
4530 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4531 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4532
4533 if (IS_CHERRYVIEW(dev_priv)) {
4534 dev->driver->irq_handler = cherryview_irq_handler;
4535 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4536 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4537 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4538 dev->driver->enable_vblank = valleyview_enable_vblank;
4539 dev->driver->disable_vblank = valleyview_disable_vblank;
4540 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4541 } else if (IS_VALLEYVIEW(dev_priv)) {
4542 dev->driver->irq_handler = valleyview_irq_handler;
4543 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4544 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4545 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4546 dev->driver->enable_vblank = valleyview_enable_vblank;
4547 dev->driver->disable_vblank = valleyview_disable_vblank;
4548 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4549 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4550 dev->driver->irq_handler = gen8_irq_handler;
4551 dev->driver->irq_preinstall = gen8_irq_reset;
4552 dev->driver->irq_postinstall = gen8_irq_postinstall;
4553 dev->driver->irq_uninstall = gen8_irq_uninstall;
4554 dev->driver->enable_vblank = gen8_enable_vblank;
4555 dev->driver->disable_vblank = gen8_disable_vblank;
4556 if (IS_BROXTON(dev))
4557 dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
4558 else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
4559 dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
4560 else
4561 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4562 } else if (HAS_PCH_SPLIT(dev)) {
4563 dev->driver->irq_handler = ironlake_irq_handler;
4564 dev->driver->irq_preinstall = ironlake_irq_reset;
4565 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4566 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4567 dev->driver->enable_vblank = ironlake_enable_vblank;
4568 dev->driver->disable_vblank = ironlake_disable_vblank;
4569 dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
4570 } else {
4571 if (IS_GEN2(dev_priv)) {
4572 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4573 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4574 dev->driver->irq_handler = i8xx_irq_handler;
4575 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4576 } else if (IS_GEN3(dev_priv)) {
4577 dev->driver->irq_preinstall = i915_irq_preinstall;
4578 dev->driver->irq_postinstall = i915_irq_postinstall;
4579 dev->driver->irq_uninstall = i915_irq_uninstall;
4580 dev->driver->irq_handler = i915_irq_handler;
4581 } else {
4582 dev->driver->irq_preinstall = i965_irq_preinstall;
4583 dev->driver->irq_postinstall = i965_irq_postinstall;
4584 dev->driver->irq_uninstall = i965_irq_uninstall;
4585 dev->driver->irq_handler = i965_irq_handler;
4586 }
4587 if (I915_HAS_HOTPLUG(dev_priv))
4588 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4589 dev->driver->enable_vblank = i915_enable_vblank;
4590 dev->driver->disable_vblank = i915_disable_vblank;
4591 }
4592 }
4593
4594 /**
4595 * intel_irq_install - enables the hardware interrupt
4596 * @dev_priv: i915 device instance
4597 *
4598 * This function enables the hardware interrupt handling, but leaves the hotplug
4599 * handling still disabled. It is called after intel_irq_init().
4600 *
4601 * In the driver load and resume code we need working interrupts in a few places
4602 * but don't want to deal with the hassle of concurrent probe and hotplug
4603 * workers. Hence the split into this two-stage approach.
4604 */
4605 int intel_irq_install(struct drm_i915_private *dev_priv)
4606 {
4607 /*
4608 * We enable some interrupt sources in our postinstall hooks, so mark
4609 * interrupts as enabled _before_ actually enabling them to avoid
4610 * special cases in our ordering checks.
4611 */
4612 dev_priv->pm.irqs_enabled = true;
4613
4614 return drm_irq_install(&dev_priv->drm, dev_priv->drm.pdev->irq);
4615 }
4616
4617 /**
4618 * intel_irq_uninstall - finilizes all irq handling
4619 * @dev_priv: i915 device instance
4620 *
4621 * This stops interrupt and hotplug handling and unregisters and frees all
4622 * resources acquired in the init functions.
4623 */
4624 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4625 {
4626 drm_irq_uninstall(&dev_priv->drm);
4627 intel_hpd_cancel_work(dev_priv);
4628 dev_priv->pm.irqs_enabled = false;
4629 }
4630
4631 /**
4632 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4633 * @dev_priv: i915 device instance
4634 *
4635 * This function is used to disable interrupts at runtime, both in the runtime
4636 * pm and the system suspend/resume code.
4637 */
4638 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4639 {
4640 dev_priv->drm.driver->irq_uninstall(&dev_priv->drm);
4641 dev_priv->pm.irqs_enabled = false;
4642 synchronize_irq(dev_priv->drm.irq);
4643 }
4644
4645 /**
4646 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4647 * @dev_priv: i915 device instance
4648 *
4649 * This function is used to enable interrupts at runtime, both in the runtime
4650 * pm and the system suspend/resume code.
4651 */
4652 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4653 {
4654 dev_priv->pm.irqs_enabled = true;
4655 dev_priv->drm.driver->irq_preinstall(&dev_priv->drm);
4656 dev_priv->drm.driver->irq_postinstall(&dev_priv->drm);
4657 }
This page took 0.189825 seconds and 5 git commands to generate.