Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming...
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
31 #include <linux/sysrq.h>
32 #include <linux/slab.h>
33 #include <linux/circ_buf.h>
34 #include <drm/drmP.h>
35 #include <drm/i915_drm.h>
36 #include "i915_drv.h"
37 #include "i915_trace.h"
38 #include "intel_drv.h"
39
40 /**
41 * DOC: interrupt handling
42 *
43 * These functions provide the basic support for enabling and disabling the
44 * interrupt handling support. There's a lot more functionality in i915_irq.c
45 * and related files, but that will be described in separate chapters.
46 */
47
48 static const u32 hpd_ibx[HPD_NUM_PINS] = {
49 [HPD_CRT] = SDE_CRT_HOTPLUG,
50 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
51 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
52 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
53 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
54 };
55
56 static const u32 hpd_cpt[HPD_NUM_PINS] = {
57 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
58 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
59 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
60 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
61 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
62 };
63
64 static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
71 };
72
73 static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80 };
81
82 static const u32 hpd_status_i915[HPD_NUM_PINS] = { /* i915 and valleyview are the same */
83 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
84 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
85 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
86 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
87 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
88 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
89 };
90
91 /* IIR can theoretically queue up two events. Be paranoid. */
92 #define GEN8_IRQ_RESET_NDX(type, which) do { \
93 I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \
94 POSTING_READ(GEN8_##type##_IMR(which)); \
95 I915_WRITE(GEN8_##type##_IER(which), 0); \
96 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
97 POSTING_READ(GEN8_##type##_IIR(which)); \
98 I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \
99 POSTING_READ(GEN8_##type##_IIR(which)); \
100 } while (0)
101
102 #define GEN5_IRQ_RESET(type) do { \
103 I915_WRITE(type##IMR, 0xffffffff); \
104 POSTING_READ(type##IMR); \
105 I915_WRITE(type##IER, 0); \
106 I915_WRITE(type##IIR, 0xffffffff); \
107 POSTING_READ(type##IIR); \
108 I915_WRITE(type##IIR, 0xffffffff); \
109 POSTING_READ(type##IIR); \
110 } while (0)
111
112 /*
113 * We should clear IMR at preinstall/uninstall, and just check at postinstall.
114 */
115 #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \
116 u32 val = I915_READ(reg); \
117 if (val) { \
118 WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \
119 (reg), val); \
120 I915_WRITE((reg), 0xffffffff); \
121 POSTING_READ(reg); \
122 I915_WRITE((reg), 0xffffffff); \
123 POSTING_READ(reg); \
124 } \
125 } while (0)
126
127 #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \
128 GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \
129 I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \
130 I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \
131 POSTING_READ(GEN8_##type##_IMR(which)); \
132 } while (0)
133
134 #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \
135 GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \
136 I915_WRITE(type##IER, (ier_val)); \
137 I915_WRITE(type##IMR, (imr_val)); \
138 POSTING_READ(type##IMR); \
139 } while (0)
140
141 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir);
142
143 /* For display hotplug interrupt */
144 void
145 ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
146 {
147 assert_spin_locked(&dev_priv->irq_lock);
148
149 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
150 return;
151
152 if ((dev_priv->irq_mask & mask) != 0) {
153 dev_priv->irq_mask &= ~mask;
154 I915_WRITE(DEIMR, dev_priv->irq_mask);
155 POSTING_READ(DEIMR);
156 }
157 }
158
159 void
160 ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask)
161 {
162 assert_spin_locked(&dev_priv->irq_lock);
163
164 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
165 return;
166
167 if ((dev_priv->irq_mask & mask) != mask) {
168 dev_priv->irq_mask |= mask;
169 I915_WRITE(DEIMR, dev_priv->irq_mask);
170 POSTING_READ(DEIMR);
171 }
172 }
173
174 /**
175 * ilk_update_gt_irq - update GTIMR
176 * @dev_priv: driver private
177 * @interrupt_mask: mask of interrupt bits to update
178 * @enabled_irq_mask: mask of interrupt bits to enable
179 */
180 static void ilk_update_gt_irq(struct drm_i915_private *dev_priv,
181 uint32_t interrupt_mask,
182 uint32_t enabled_irq_mask)
183 {
184 assert_spin_locked(&dev_priv->irq_lock);
185
186 WARN_ON(enabled_irq_mask & ~interrupt_mask);
187
188 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
189 return;
190
191 dev_priv->gt_irq_mask &= ~interrupt_mask;
192 dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask);
193 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
194 POSTING_READ(GTIMR);
195 }
196
197 void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
198 {
199 ilk_update_gt_irq(dev_priv, mask, mask);
200 }
201
202 void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask)
203 {
204 ilk_update_gt_irq(dev_priv, mask, 0);
205 }
206
207 static u32 gen6_pm_iir(struct drm_i915_private *dev_priv)
208 {
209 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR;
210 }
211
212 static u32 gen6_pm_imr(struct drm_i915_private *dev_priv)
213 {
214 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR;
215 }
216
217 static u32 gen6_pm_ier(struct drm_i915_private *dev_priv)
218 {
219 return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER;
220 }
221
222 /**
223 * snb_update_pm_irq - update GEN6_PMIMR
224 * @dev_priv: driver private
225 * @interrupt_mask: mask of interrupt bits to update
226 * @enabled_irq_mask: mask of interrupt bits to enable
227 */
228 static void snb_update_pm_irq(struct drm_i915_private *dev_priv,
229 uint32_t interrupt_mask,
230 uint32_t enabled_irq_mask)
231 {
232 uint32_t new_val;
233
234 WARN_ON(enabled_irq_mask & ~interrupt_mask);
235
236 assert_spin_locked(&dev_priv->irq_lock);
237
238 new_val = dev_priv->pm_irq_mask;
239 new_val &= ~interrupt_mask;
240 new_val |= (~enabled_irq_mask & interrupt_mask);
241
242 if (new_val != dev_priv->pm_irq_mask) {
243 dev_priv->pm_irq_mask = new_val;
244 I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask);
245 POSTING_READ(gen6_pm_imr(dev_priv));
246 }
247 }
248
249 void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
250 {
251 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
252 return;
253
254 snb_update_pm_irq(dev_priv, mask, mask);
255 }
256
257 static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv,
258 uint32_t mask)
259 {
260 snb_update_pm_irq(dev_priv, mask, 0);
261 }
262
263 void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask)
264 {
265 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
266 return;
267
268 __gen6_disable_pm_irq(dev_priv, mask);
269 }
270
271 void gen6_reset_rps_interrupts(struct drm_device *dev)
272 {
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 uint32_t reg = gen6_pm_iir(dev_priv);
275
276 spin_lock_irq(&dev_priv->irq_lock);
277 I915_WRITE(reg, dev_priv->pm_rps_events);
278 I915_WRITE(reg, dev_priv->pm_rps_events);
279 POSTING_READ(reg);
280 spin_unlock_irq(&dev_priv->irq_lock);
281 }
282
283 void gen6_enable_rps_interrupts(struct drm_device *dev)
284 {
285 struct drm_i915_private *dev_priv = dev->dev_private;
286
287 spin_lock_irq(&dev_priv->irq_lock);
288
289 WARN_ON(dev_priv->rps.pm_iir);
290 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
291 dev_priv->rps.interrupts_enabled = true;
292 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
293 dev_priv->pm_rps_events);
294 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
295
296 spin_unlock_irq(&dev_priv->irq_lock);
297 }
298
299 u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
300 {
301 /*
302 * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
303 * if GEN6_PM_UP_EI_EXPIRED is masked.
304 *
305 * TODO: verify if this can be reproduced on VLV,CHV.
306 */
307 if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
308 mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
309
310 if (INTEL_INFO(dev_priv)->gen >= 8)
311 mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
312
313 return mask;
314 }
315
316 void gen6_disable_rps_interrupts(struct drm_device *dev)
317 {
318 struct drm_i915_private *dev_priv = dev->dev_private;
319
320 spin_lock_irq(&dev_priv->irq_lock);
321 dev_priv->rps.interrupts_enabled = false;
322 spin_unlock_irq(&dev_priv->irq_lock);
323
324 cancel_work_sync(&dev_priv->rps.work);
325
326 spin_lock_irq(&dev_priv->irq_lock);
327
328 I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
329
330 __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
331 I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
332 ~dev_priv->pm_rps_events);
333 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
334 I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events);
335
336 dev_priv->rps.pm_iir = 0;
337
338 spin_unlock_irq(&dev_priv->irq_lock);
339 }
340
341 /**
342 * ibx_display_interrupt_update - update SDEIMR
343 * @dev_priv: driver private
344 * @interrupt_mask: mask of interrupt bits to update
345 * @enabled_irq_mask: mask of interrupt bits to enable
346 */
347 void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
348 uint32_t interrupt_mask,
349 uint32_t enabled_irq_mask)
350 {
351 uint32_t sdeimr = I915_READ(SDEIMR);
352 sdeimr &= ~interrupt_mask;
353 sdeimr |= (~enabled_irq_mask & interrupt_mask);
354
355 WARN_ON(enabled_irq_mask & ~interrupt_mask);
356
357 assert_spin_locked(&dev_priv->irq_lock);
358
359 if (WARN_ON(!intel_irqs_enabled(dev_priv)))
360 return;
361
362 I915_WRITE(SDEIMR, sdeimr);
363 POSTING_READ(SDEIMR);
364 }
365
366 static void
367 __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
368 u32 enable_mask, u32 status_mask)
369 {
370 u32 reg = PIPESTAT(pipe);
371 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
372
373 assert_spin_locked(&dev_priv->irq_lock);
374 WARN_ON(!intel_irqs_enabled(dev_priv));
375
376 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
377 status_mask & ~PIPESTAT_INT_STATUS_MASK,
378 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
379 pipe_name(pipe), enable_mask, status_mask))
380 return;
381
382 if ((pipestat & enable_mask) == enable_mask)
383 return;
384
385 dev_priv->pipestat_irq_mask[pipe] |= status_mask;
386
387 /* Enable the interrupt, clear any pending status */
388 pipestat |= enable_mask | status_mask;
389 I915_WRITE(reg, pipestat);
390 POSTING_READ(reg);
391 }
392
393 static void
394 __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
395 u32 enable_mask, u32 status_mask)
396 {
397 u32 reg = PIPESTAT(pipe);
398 u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK;
399
400 assert_spin_locked(&dev_priv->irq_lock);
401 WARN_ON(!intel_irqs_enabled(dev_priv));
402
403 if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
404 status_mask & ~PIPESTAT_INT_STATUS_MASK,
405 "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
406 pipe_name(pipe), enable_mask, status_mask))
407 return;
408
409 if ((pipestat & enable_mask) == 0)
410 return;
411
412 dev_priv->pipestat_irq_mask[pipe] &= ~status_mask;
413
414 pipestat &= ~enable_mask;
415 I915_WRITE(reg, pipestat);
416 POSTING_READ(reg);
417 }
418
419 static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask)
420 {
421 u32 enable_mask = status_mask << 16;
422
423 /*
424 * On pipe A we don't support the PSR interrupt yet,
425 * on pipe B and C the same bit MBZ.
426 */
427 if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
428 return 0;
429 /*
430 * On pipe B and C we don't support the PSR interrupt yet, on pipe
431 * A the same bit is for perf counters which we don't use either.
432 */
433 if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
434 return 0;
435
436 enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
437 SPRITE0_FLIP_DONE_INT_EN_VLV |
438 SPRITE1_FLIP_DONE_INT_EN_VLV);
439 if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV)
440 enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV;
441 if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV)
442 enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
443
444 return enable_mask;
445 }
446
447 void
448 i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
449 u32 status_mask)
450 {
451 u32 enable_mask;
452
453 if (IS_VALLEYVIEW(dev_priv->dev))
454 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
455 status_mask);
456 else
457 enable_mask = status_mask << 16;
458 __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask);
459 }
460
461 void
462 i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe,
463 u32 status_mask)
464 {
465 u32 enable_mask;
466
467 if (IS_VALLEYVIEW(dev_priv->dev))
468 enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev,
469 status_mask);
470 else
471 enable_mask = status_mask << 16;
472 __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask);
473 }
474
475 /**
476 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
477 */
478 static void i915_enable_asle_pipestat(struct drm_device *dev)
479 {
480 struct drm_i915_private *dev_priv = dev->dev_private;
481
482 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
483 return;
484
485 spin_lock_irq(&dev_priv->irq_lock);
486
487 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS);
488 if (INTEL_INFO(dev)->gen >= 4)
489 i915_enable_pipestat(dev_priv, PIPE_A,
490 PIPE_LEGACY_BLC_EVENT_STATUS);
491
492 spin_unlock_irq(&dev_priv->irq_lock);
493 }
494
495 /**
496 * i915_pipe_enabled - check if a pipe is enabled
497 * @dev: DRM device
498 * @pipe: pipe to check
499 *
500 * Reading certain registers when the pipe is disabled can hang the chip.
501 * Use this routine to make sure the PLL is running and the pipe is active
502 * before reading such registers if unsure.
503 */
504 static int
505 i915_pipe_enabled(struct drm_device *dev, int pipe)
506 {
507 struct drm_i915_private *dev_priv = dev->dev_private;
508
509 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
510 /* Locking is horribly broken here, but whatever. */
511 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
512 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
513
514 return intel_crtc->active;
515 } else {
516 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
517 }
518 }
519
520 /*
521 * This timing diagram depicts the video signal in and
522 * around the vertical blanking period.
523 *
524 * Assumptions about the fictitious mode used in this example:
525 * vblank_start >= 3
526 * vsync_start = vblank_start + 1
527 * vsync_end = vblank_start + 2
528 * vtotal = vblank_start + 3
529 *
530 * start of vblank:
531 * latch double buffered registers
532 * increment frame counter (ctg+)
533 * generate start of vblank interrupt (gen4+)
534 * |
535 * | frame start:
536 * | generate frame start interrupt (aka. vblank interrupt) (gmch)
537 * | may be shifted forward 1-3 extra lines via PIPECONF
538 * | |
539 * | | start of vsync:
540 * | | generate vsync interrupt
541 * | | |
542 * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx
543 * . \hs/ . \hs/ \hs/ \hs/ . \hs/
544 * ----va---> <-----------------vb--------------------> <--------va-------------
545 * | | <----vs-----> |
546 * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2)
547 * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+)
548 * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi)
549 * | | |
550 * last visible pixel first visible pixel
551 * | increment frame counter (gen3/4)
552 * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4)
553 *
554 * x = horizontal active
555 * _ = horizontal blanking
556 * hs = horizontal sync
557 * va = vertical active
558 * vb = vertical blanking
559 * vs = vertical sync
560 * vbs = vblank_start (number)
561 *
562 * Summary:
563 * - most events happen at the start of horizontal sync
564 * - frame start happens at the start of horizontal blank, 1-4 lines
565 * (depending on PIPECONF settings) after the start of vblank
566 * - gen3/4 pixel and frame counter are synchronized with the start
567 * of horizontal active on the first line of vertical active
568 */
569
570 static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe)
571 {
572 /* Gen2 doesn't have a hardware frame counter */
573 return 0;
574 }
575
576 /* Called from drm generic code, passed a 'crtc', which
577 * we use as a pipe index
578 */
579 static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
580 {
581 struct drm_i915_private *dev_priv = dev->dev_private;
582 unsigned long high_frame;
583 unsigned long low_frame;
584 u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal;
585
586 if (!i915_pipe_enabled(dev, pipe)) {
587 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
588 "pipe %c\n", pipe_name(pipe));
589 return 0;
590 }
591
592 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
593 struct intel_crtc *intel_crtc =
594 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
595 const struct drm_display_mode *mode =
596 &intel_crtc->config->base.adjusted_mode;
597
598 htotal = mode->crtc_htotal;
599 hsync_start = mode->crtc_hsync_start;
600 vbl_start = mode->crtc_vblank_start;
601 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
602 vbl_start = DIV_ROUND_UP(vbl_start, 2);
603 } else {
604 enum transcoder cpu_transcoder = (enum transcoder) pipe;
605
606 htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
607 hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1;
608 vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1;
609 if ((I915_READ(PIPECONF(cpu_transcoder)) &
610 PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE)
611 vbl_start = DIV_ROUND_UP(vbl_start, 2);
612 }
613
614 /* Convert to pixel count */
615 vbl_start *= htotal;
616
617 /* Start of vblank event occurs at start of hsync */
618 vbl_start -= htotal - hsync_start;
619
620 high_frame = PIPEFRAME(pipe);
621 low_frame = PIPEFRAMEPIXEL(pipe);
622
623 /*
624 * High & low register fields aren't synchronized, so make sure
625 * we get a low value that's stable across two reads of the high
626 * register.
627 */
628 do {
629 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
630 low = I915_READ(low_frame);
631 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
632 } while (high1 != high2);
633
634 high1 >>= PIPE_FRAME_HIGH_SHIFT;
635 pixel = low & PIPE_PIXEL_MASK;
636 low >>= PIPE_FRAME_LOW_SHIFT;
637
638 /*
639 * The frame counter increments at beginning of active.
640 * Cook up a vblank counter by also checking the pixel
641 * counter against vblank start.
642 */
643 return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff;
644 }
645
646 static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
647 {
648 struct drm_i915_private *dev_priv = dev->dev_private;
649 int reg = PIPE_FRMCOUNT_GM45(pipe);
650
651 if (!i915_pipe_enabled(dev, pipe)) {
652 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
653 "pipe %c\n", pipe_name(pipe));
654 return 0;
655 }
656
657 return I915_READ(reg);
658 }
659
660 /* raw reads, only for fast reads of display block, no need for forcewake etc. */
661 #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
662
663 static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
664 {
665 struct drm_device *dev = crtc->base.dev;
666 struct drm_i915_private *dev_priv = dev->dev_private;
667 const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode;
668 enum pipe pipe = crtc->pipe;
669 int position, vtotal;
670
671 vtotal = mode->crtc_vtotal;
672 if (mode->flags & DRM_MODE_FLAG_INTERLACE)
673 vtotal /= 2;
674
675 if (IS_GEN2(dev))
676 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
677 else
678 position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
679
680 /*
681 * See update_scanline_offset() for the details on the
682 * scanline_offset adjustment.
683 */
684 return (position + crtc->scanline_offset) % vtotal;
685 }
686
687 static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
688 unsigned int flags, int *vpos, int *hpos,
689 ktime_t *stime, ktime_t *etime)
690 {
691 struct drm_i915_private *dev_priv = dev->dev_private;
692 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
693 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
694 const struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode;
695 int position;
696 int vbl_start, vbl_end, hsync_start, htotal, vtotal;
697 bool in_vbl = true;
698 int ret = 0;
699 unsigned long irqflags;
700
701 if (!intel_crtc->active) {
702 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
703 "pipe %c\n", pipe_name(pipe));
704 return 0;
705 }
706
707 htotal = mode->crtc_htotal;
708 hsync_start = mode->crtc_hsync_start;
709 vtotal = mode->crtc_vtotal;
710 vbl_start = mode->crtc_vblank_start;
711 vbl_end = mode->crtc_vblank_end;
712
713 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
714 vbl_start = DIV_ROUND_UP(vbl_start, 2);
715 vbl_end /= 2;
716 vtotal /= 2;
717 }
718
719 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
720
721 /*
722 * Lock uncore.lock, as we will do multiple timing critical raw
723 * register reads, potentially with preemption disabled, so the
724 * following code must not block on uncore.lock.
725 */
726 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
727
728 /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
729
730 /* Get optional system timestamp before query. */
731 if (stime)
732 *stime = ktime_get();
733
734 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
735 /* No obvious pixelcount register. Only query vertical
736 * scanout position from Display scan line register.
737 */
738 position = __intel_get_crtc_scanline(intel_crtc);
739 } else {
740 /* Have access to pixelcount since start of frame.
741 * We can split this into vertical and horizontal
742 * scanout position.
743 */
744 position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
745
746 /* convert to pixel counts */
747 vbl_start *= htotal;
748 vbl_end *= htotal;
749 vtotal *= htotal;
750
751 /*
752 * In interlaced modes, the pixel counter counts all pixels,
753 * so one field will have htotal more pixels. In order to avoid
754 * the reported position from jumping backwards when the pixel
755 * counter is beyond the length of the shorter field, just
756 * clamp the position the length of the shorter field. This
757 * matches how the scanline counter based position works since
758 * the scanline counter doesn't count the two half lines.
759 */
760 if (position >= vtotal)
761 position = vtotal - 1;
762
763 /*
764 * Start of vblank interrupt is triggered at start of hsync,
765 * just prior to the first active line of vblank. However we
766 * consider lines to start at the leading edge of horizontal
767 * active. So, should we get here before we've crossed into
768 * the horizontal active of the first line in vblank, we would
769 * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that,
770 * always add htotal-hsync_start to the current pixel position.
771 */
772 position = (position + htotal - hsync_start) % vtotal;
773 }
774
775 /* Get optional system timestamp after query. */
776 if (etime)
777 *etime = ktime_get();
778
779 /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
780
781 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
782
783 in_vbl = position >= vbl_start && position < vbl_end;
784
785 /*
786 * While in vblank, position will be negative
787 * counting up towards 0 at vbl_end. And outside
788 * vblank, position will be positive counting
789 * up since vbl_end.
790 */
791 if (position >= vbl_start)
792 position -= vbl_end;
793 else
794 position += vtotal - vbl_end;
795
796 if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
797 *vpos = position;
798 *hpos = 0;
799 } else {
800 *vpos = position / htotal;
801 *hpos = position - (*vpos * htotal);
802 }
803
804 /* In vblank? */
805 if (in_vbl)
806 ret |= DRM_SCANOUTPOS_IN_VBLANK;
807
808 return ret;
809 }
810
811 int intel_get_crtc_scanline(struct intel_crtc *crtc)
812 {
813 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
814 unsigned long irqflags;
815 int position;
816
817 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
818 position = __intel_get_crtc_scanline(crtc);
819 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
820
821 return position;
822 }
823
824 static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
825 int *max_error,
826 struct timeval *vblank_time,
827 unsigned flags)
828 {
829 struct drm_crtc *crtc;
830
831 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
832 DRM_ERROR("Invalid crtc %d\n", pipe);
833 return -EINVAL;
834 }
835
836 /* Get drm_crtc to timestamp: */
837 crtc = intel_get_crtc_for_pipe(dev, pipe);
838 if (crtc == NULL) {
839 DRM_ERROR("Invalid crtc %d\n", pipe);
840 return -EINVAL;
841 }
842
843 if (!crtc->enabled) {
844 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
845 return -EBUSY;
846 }
847
848 /* Helper routine in DRM core does all the work: */
849 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
850 vblank_time, flags,
851 crtc,
852 &to_intel_crtc(crtc)->config->base.adjusted_mode);
853 }
854
855 static bool intel_hpd_irq_event(struct drm_device *dev,
856 struct drm_connector *connector)
857 {
858 enum drm_connector_status old_status;
859
860 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
861 old_status = connector->status;
862
863 connector->status = connector->funcs->detect(connector, false);
864 if (old_status == connector->status)
865 return false;
866
867 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
868 connector->base.id,
869 connector->name,
870 drm_get_connector_status_name(old_status),
871 drm_get_connector_status_name(connector->status));
872
873 return true;
874 }
875
876 static void i915_digport_work_func(struct work_struct *work)
877 {
878 struct drm_i915_private *dev_priv =
879 container_of(work, struct drm_i915_private, dig_port_work);
880 u32 long_port_mask, short_port_mask;
881 struct intel_digital_port *intel_dig_port;
882 int i;
883 u32 old_bits = 0;
884
885 spin_lock_irq(&dev_priv->irq_lock);
886 long_port_mask = dev_priv->long_hpd_port_mask;
887 dev_priv->long_hpd_port_mask = 0;
888 short_port_mask = dev_priv->short_hpd_port_mask;
889 dev_priv->short_hpd_port_mask = 0;
890 spin_unlock_irq(&dev_priv->irq_lock);
891
892 for (i = 0; i < I915_MAX_PORTS; i++) {
893 bool valid = false;
894 bool long_hpd = false;
895 intel_dig_port = dev_priv->hpd_irq_port[i];
896 if (!intel_dig_port || !intel_dig_port->hpd_pulse)
897 continue;
898
899 if (long_port_mask & (1 << i)) {
900 valid = true;
901 long_hpd = true;
902 } else if (short_port_mask & (1 << i))
903 valid = true;
904
905 if (valid) {
906 enum irqreturn ret;
907
908 ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd);
909 if (ret == IRQ_NONE) {
910 /* fall back to old school hpd */
911 old_bits |= (1 << intel_dig_port->base.hpd_pin);
912 }
913 }
914 }
915
916 if (old_bits) {
917 spin_lock_irq(&dev_priv->irq_lock);
918 dev_priv->hpd_event_bits |= old_bits;
919 spin_unlock_irq(&dev_priv->irq_lock);
920 schedule_work(&dev_priv->hotplug_work);
921 }
922 }
923
924 /*
925 * Handle hotplug events outside the interrupt handler proper.
926 */
927 #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
928
929 static void i915_hotplug_work_func(struct work_struct *work)
930 {
931 struct drm_i915_private *dev_priv =
932 container_of(work, struct drm_i915_private, hotplug_work);
933 struct drm_device *dev = dev_priv->dev;
934 struct drm_mode_config *mode_config = &dev->mode_config;
935 struct intel_connector *intel_connector;
936 struct intel_encoder *intel_encoder;
937 struct drm_connector *connector;
938 bool hpd_disabled = false;
939 bool changed = false;
940 u32 hpd_event_bits;
941
942 mutex_lock(&mode_config->mutex);
943 DRM_DEBUG_KMS("running encoder hotplug functions\n");
944
945 spin_lock_irq(&dev_priv->irq_lock);
946
947 hpd_event_bits = dev_priv->hpd_event_bits;
948 dev_priv->hpd_event_bits = 0;
949 list_for_each_entry(connector, &mode_config->connector_list, head) {
950 intel_connector = to_intel_connector(connector);
951 if (!intel_connector->encoder)
952 continue;
953 intel_encoder = intel_connector->encoder;
954 if (intel_encoder->hpd_pin > HPD_NONE &&
955 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
956 connector->polled == DRM_CONNECTOR_POLL_HPD) {
957 DRM_INFO("HPD interrupt storm detected on connector %s: "
958 "switching from hotplug detection to polling\n",
959 connector->name);
960 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
961 connector->polled = DRM_CONNECTOR_POLL_CONNECT
962 | DRM_CONNECTOR_POLL_DISCONNECT;
963 hpd_disabled = true;
964 }
965 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
966 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
967 connector->name, intel_encoder->hpd_pin);
968 }
969 }
970 /* if there were no outputs to poll, poll was disabled,
971 * therefore make sure it's enabled when disabling HPD on
972 * some connectors */
973 if (hpd_disabled) {
974 drm_kms_helper_poll_enable(dev);
975 mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work,
976 msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
977 }
978
979 spin_unlock_irq(&dev_priv->irq_lock);
980
981 list_for_each_entry(connector, &mode_config->connector_list, head) {
982 intel_connector = to_intel_connector(connector);
983 if (!intel_connector->encoder)
984 continue;
985 intel_encoder = intel_connector->encoder;
986 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
987 if (intel_encoder->hot_plug)
988 intel_encoder->hot_plug(intel_encoder);
989 if (intel_hpd_irq_event(dev, connector))
990 changed = true;
991 }
992 }
993 mutex_unlock(&mode_config->mutex);
994
995 if (changed)
996 drm_kms_helper_hotplug_event(dev);
997 }
998
999 static void ironlake_rps_change_irq_handler(struct drm_device *dev)
1000 {
1001 struct drm_i915_private *dev_priv = dev->dev_private;
1002 u32 busy_up, busy_down, max_avg, min_avg;
1003 u8 new_delay;
1004
1005 spin_lock(&mchdev_lock);
1006
1007 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
1008
1009 new_delay = dev_priv->ips.cur_delay;
1010
1011 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
1012 busy_up = I915_READ(RCPREVBSYTUPAVG);
1013 busy_down = I915_READ(RCPREVBSYTDNAVG);
1014 max_avg = I915_READ(RCBMAXAVG);
1015 min_avg = I915_READ(RCBMINAVG);
1016
1017 /* Handle RCS change request from hw */
1018 if (busy_up > max_avg) {
1019 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
1020 new_delay = dev_priv->ips.cur_delay - 1;
1021 if (new_delay < dev_priv->ips.max_delay)
1022 new_delay = dev_priv->ips.max_delay;
1023 } else if (busy_down < min_avg) {
1024 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
1025 new_delay = dev_priv->ips.cur_delay + 1;
1026 if (new_delay > dev_priv->ips.min_delay)
1027 new_delay = dev_priv->ips.min_delay;
1028 }
1029
1030 if (ironlake_set_drps(dev, new_delay))
1031 dev_priv->ips.cur_delay = new_delay;
1032
1033 spin_unlock(&mchdev_lock);
1034
1035 return;
1036 }
1037
1038 static void notify_ring(struct drm_device *dev,
1039 struct intel_engine_cs *ring)
1040 {
1041 if (!intel_ring_initialized(ring))
1042 return;
1043
1044 trace_i915_gem_request_notify(ring);
1045
1046 wake_up_all(&ring->irq_queue);
1047 }
1048
1049 static u32 vlv_c0_residency(struct drm_i915_private *dev_priv,
1050 struct intel_rps_ei *rps_ei)
1051 {
1052 u32 cz_ts, cz_freq_khz;
1053 u32 render_count, media_count;
1054 u32 elapsed_render, elapsed_media, elapsed_time;
1055 u32 residency = 0;
1056
1057 cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP);
1058 cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4);
1059
1060 render_count = I915_READ(VLV_RENDER_C0_COUNT_REG);
1061 media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG);
1062
1063 if (rps_ei->cz_clock == 0) {
1064 rps_ei->cz_clock = cz_ts;
1065 rps_ei->render_c0 = render_count;
1066 rps_ei->media_c0 = media_count;
1067
1068 return dev_priv->rps.cur_freq;
1069 }
1070
1071 elapsed_time = cz_ts - rps_ei->cz_clock;
1072 rps_ei->cz_clock = cz_ts;
1073
1074 elapsed_render = render_count - rps_ei->render_c0;
1075 rps_ei->render_c0 = render_count;
1076
1077 elapsed_media = media_count - rps_ei->media_c0;
1078 rps_ei->media_c0 = media_count;
1079
1080 /* Convert all the counters into common unit of milli sec */
1081 elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC;
1082 elapsed_render /= cz_freq_khz;
1083 elapsed_media /= cz_freq_khz;
1084
1085 /*
1086 * Calculate overall C0 residency percentage
1087 * only if elapsed time is non zero
1088 */
1089 if (elapsed_time) {
1090 residency =
1091 ((max(elapsed_render, elapsed_media) * 100)
1092 / elapsed_time);
1093 }
1094
1095 return residency;
1096 }
1097
1098 /**
1099 * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU
1100 * busy-ness calculated from C0 counters of render & media power wells
1101 * @dev_priv: DRM device private
1102 *
1103 */
1104 static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv)
1105 {
1106 u32 residency_C0_up = 0, residency_C0_down = 0;
1107 int new_delay, adj;
1108
1109 dev_priv->rps.ei_interrupt_count++;
1110
1111 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
1112
1113
1114 if (dev_priv->rps.up_ei.cz_clock == 0) {
1115 vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei);
1116 vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei);
1117 return dev_priv->rps.cur_freq;
1118 }
1119
1120
1121 /*
1122 * To down throttle, C0 residency should be less than down threshold
1123 * for continous EI intervals. So calculate down EI counters
1124 * once in VLV_INT_COUNT_FOR_DOWN_EI
1125 */
1126 if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) {
1127
1128 dev_priv->rps.ei_interrupt_count = 0;
1129
1130 residency_C0_down = vlv_c0_residency(dev_priv,
1131 &dev_priv->rps.down_ei);
1132 } else {
1133 residency_C0_up = vlv_c0_residency(dev_priv,
1134 &dev_priv->rps.up_ei);
1135 }
1136
1137 new_delay = dev_priv->rps.cur_freq;
1138
1139 adj = dev_priv->rps.last_adj;
1140 /* C0 residency is greater than UP threshold. Increase Frequency */
1141 if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) {
1142 if (adj > 0)
1143 adj *= 2;
1144 else
1145 adj = 1;
1146
1147 if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit)
1148 new_delay = dev_priv->rps.cur_freq + adj;
1149
1150 /*
1151 * For better performance, jump directly
1152 * to RPe if we're below it.
1153 */
1154 if (new_delay < dev_priv->rps.efficient_freq)
1155 new_delay = dev_priv->rps.efficient_freq;
1156
1157 } else if (!dev_priv->rps.ei_interrupt_count &&
1158 (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) {
1159 if (adj < 0)
1160 adj *= 2;
1161 else
1162 adj = -1;
1163 /*
1164 * This means, C0 residency is less than down threshold over
1165 * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq
1166 */
1167 if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit)
1168 new_delay = dev_priv->rps.cur_freq + adj;
1169 }
1170
1171 return new_delay;
1172 }
1173
1174 static void gen6_pm_rps_work(struct work_struct *work)
1175 {
1176 struct drm_i915_private *dev_priv =
1177 container_of(work, struct drm_i915_private, rps.work);
1178 u32 pm_iir;
1179 int new_delay, adj;
1180
1181 spin_lock_irq(&dev_priv->irq_lock);
1182 /* Speed up work cancelation during disabling rps interrupts. */
1183 if (!dev_priv->rps.interrupts_enabled) {
1184 spin_unlock_irq(&dev_priv->irq_lock);
1185 return;
1186 }
1187 pm_iir = dev_priv->rps.pm_iir;
1188 dev_priv->rps.pm_iir = 0;
1189 /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */
1190 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
1191 spin_unlock_irq(&dev_priv->irq_lock);
1192
1193 /* Make sure we didn't queue anything we're not going to process. */
1194 WARN_ON(pm_iir & ~dev_priv->pm_rps_events);
1195
1196 if ((pm_iir & dev_priv->pm_rps_events) == 0)
1197 return;
1198
1199 mutex_lock(&dev_priv->rps.hw_lock);
1200
1201 adj = dev_priv->rps.last_adj;
1202 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
1203 if (adj > 0)
1204 adj *= 2;
1205 else {
1206 /* CHV needs even encode values */
1207 adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1;
1208 }
1209 new_delay = dev_priv->rps.cur_freq + adj;
1210
1211 /*
1212 * For better performance, jump directly
1213 * to RPe if we're below it.
1214 */
1215 if (new_delay < dev_priv->rps.efficient_freq)
1216 new_delay = dev_priv->rps.efficient_freq;
1217 } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) {
1218 if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq)
1219 new_delay = dev_priv->rps.efficient_freq;
1220 else
1221 new_delay = dev_priv->rps.min_freq_softlimit;
1222 adj = 0;
1223 } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) {
1224 new_delay = vlv_calc_delay_from_C0_counters(dev_priv);
1225 } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) {
1226 if (adj < 0)
1227 adj *= 2;
1228 else {
1229 /* CHV needs even encode values */
1230 adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1;
1231 }
1232 new_delay = dev_priv->rps.cur_freq + adj;
1233 } else { /* unknown event */
1234 new_delay = dev_priv->rps.cur_freq;
1235 }
1236
1237 /* sysfs frequency interfaces may have snuck in while servicing the
1238 * interrupt
1239 */
1240 new_delay = clamp_t(int, new_delay,
1241 dev_priv->rps.min_freq_softlimit,
1242 dev_priv->rps.max_freq_softlimit);
1243
1244 dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq;
1245
1246 if (IS_VALLEYVIEW(dev_priv->dev))
1247 valleyview_set_rps(dev_priv->dev, new_delay);
1248 else
1249 gen6_set_rps(dev_priv->dev, new_delay);
1250
1251 mutex_unlock(&dev_priv->rps.hw_lock);
1252 }
1253
1254
1255 /**
1256 * ivybridge_parity_work - Workqueue called when a parity error interrupt
1257 * occurred.
1258 * @work: workqueue struct
1259 *
1260 * Doesn't actually do anything except notify userspace. As a consequence of
1261 * this event, userspace should try to remap the bad rows since statistically
1262 * it is likely the same row is more likely to go bad again.
1263 */
1264 static void ivybridge_parity_work(struct work_struct *work)
1265 {
1266 struct drm_i915_private *dev_priv =
1267 container_of(work, struct drm_i915_private, l3_parity.error_work);
1268 u32 error_status, row, bank, subbank;
1269 char *parity_event[6];
1270 uint32_t misccpctl;
1271 uint8_t slice = 0;
1272
1273 /* We must turn off DOP level clock gating to access the L3 registers.
1274 * In order to prevent a get/put style interface, acquire struct mutex
1275 * any time we access those registers.
1276 */
1277 mutex_lock(&dev_priv->dev->struct_mutex);
1278
1279 /* If we've screwed up tracking, just let the interrupt fire again */
1280 if (WARN_ON(!dev_priv->l3_parity.which_slice))
1281 goto out;
1282
1283 misccpctl = I915_READ(GEN7_MISCCPCTL);
1284 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
1285 POSTING_READ(GEN7_MISCCPCTL);
1286
1287 while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) {
1288 u32 reg;
1289
1290 slice--;
1291 if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev)))
1292 break;
1293
1294 dev_priv->l3_parity.which_slice &= ~(1<<slice);
1295
1296 reg = GEN7_L3CDERRST1 + (slice * 0x200);
1297
1298 error_status = I915_READ(reg);
1299 row = GEN7_PARITY_ERROR_ROW(error_status);
1300 bank = GEN7_PARITY_ERROR_BANK(error_status);
1301 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
1302
1303 I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE);
1304 POSTING_READ(reg);
1305
1306 parity_event[0] = I915_L3_PARITY_UEVENT "=1";
1307 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
1308 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
1309 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
1310 parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice);
1311 parity_event[5] = NULL;
1312
1313 kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj,
1314 KOBJ_CHANGE, parity_event);
1315
1316 DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n",
1317 slice, row, bank, subbank);
1318
1319 kfree(parity_event[4]);
1320 kfree(parity_event[3]);
1321 kfree(parity_event[2]);
1322 kfree(parity_event[1]);
1323 }
1324
1325 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
1326
1327 out:
1328 WARN_ON(dev_priv->l3_parity.which_slice);
1329 spin_lock_irq(&dev_priv->irq_lock);
1330 gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev));
1331 spin_unlock_irq(&dev_priv->irq_lock);
1332
1333 mutex_unlock(&dev_priv->dev->struct_mutex);
1334 }
1335
1336 static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir)
1337 {
1338 struct drm_i915_private *dev_priv = dev->dev_private;
1339
1340 if (!HAS_L3_DPF(dev))
1341 return;
1342
1343 spin_lock(&dev_priv->irq_lock);
1344 gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev));
1345 spin_unlock(&dev_priv->irq_lock);
1346
1347 iir &= GT_PARITY_ERROR(dev);
1348 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
1349 dev_priv->l3_parity.which_slice |= 1 << 1;
1350
1351 if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
1352 dev_priv->l3_parity.which_slice |= 1 << 0;
1353
1354 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
1355 }
1356
1357 static void ilk_gt_irq_handler(struct drm_device *dev,
1358 struct drm_i915_private *dev_priv,
1359 u32 gt_iir)
1360 {
1361 if (gt_iir &
1362 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1363 notify_ring(dev, &dev_priv->ring[RCS]);
1364 if (gt_iir & ILK_BSD_USER_INTERRUPT)
1365 notify_ring(dev, &dev_priv->ring[VCS]);
1366 }
1367
1368 static void snb_gt_irq_handler(struct drm_device *dev,
1369 struct drm_i915_private *dev_priv,
1370 u32 gt_iir)
1371 {
1372
1373 if (gt_iir &
1374 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
1375 notify_ring(dev, &dev_priv->ring[RCS]);
1376 if (gt_iir & GT_BSD_USER_INTERRUPT)
1377 notify_ring(dev, &dev_priv->ring[VCS]);
1378 if (gt_iir & GT_BLT_USER_INTERRUPT)
1379 notify_ring(dev, &dev_priv->ring[BCS]);
1380
1381 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
1382 GT_BSD_CS_ERROR_INTERRUPT |
1383 GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
1384 DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
1385
1386 if (gt_iir & GT_PARITY_ERROR(dev))
1387 ivybridge_parity_error_irq_handler(dev, gt_iir);
1388 }
1389
1390 static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1391 struct drm_i915_private *dev_priv,
1392 u32 master_ctl)
1393 {
1394 struct intel_engine_cs *ring;
1395 u32 rcs, bcs, vcs;
1396 uint32_t tmp = 0;
1397 irqreturn_t ret = IRQ_NONE;
1398
1399 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
1400 tmp = I915_READ(GEN8_GT_IIR(0));
1401 if (tmp) {
1402 I915_WRITE(GEN8_GT_IIR(0), tmp);
1403 ret = IRQ_HANDLED;
1404
1405 rcs = tmp >> GEN8_RCS_IRQ_SHIFT;
1406 ring = &dev_priv->ring[RCS];
1407 if (rcs & GT_RENDER_USER_INTERRUPT)
1408 notify_ring(dev, ring);
1409 if (rcs & GT_CONTEXT_SWITCH_INTERRUPT)
1410 intel_lrc_irq_handler(ring);
1411
1412 bcs = tmp >> GEN8_BCS_IRQ_SHIFT;
1413 ring = &dev_priv->ring[BCS];
1414 if (bcs & GT_RENDER_USER_INTERRUPT)
1415 notify_ring(dev, ring);
1416 if (bcs & GT_CONTEXT_SWITCH_INTERRUPT)
1417 intel_lrc_irq_handler(ring);
1418 } else
1419 DRM_ERROR("The master control interrupt lied (GT0)!\n");
1420 }
1421
1422 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) {
1423 tmp = I915_READ(GEN8_GT_IIR(1));
1424 if (tmp) {
1425 I915_WRITE(GEN8_GT_IIR(1), tmp);
1426 ret = IRQ_HANDLED;
1427
1428 vcs = tmp >> GEN8_VCS1_IRQ_SHIFT;
1429 ring = &dev_priv->ring[VCS];
1430 if (vcs & GT_RENDER_USER_INTERRUPT)
1431 notify_ring(dev, ring);
1432 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1433 intel_lrc_irq_handler(ring);
1434
1435 vcs = tmp >> GEN8_VCS2_IRQ_SHIFT;
1436 ring = &dev_priv->ring[VCS2];
1437 if (vcs & GT_RENDER_USER_INTERRUPT)
1438 notify_ring(dev, ring);
1439 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1440 intel_lrc_irq_handler(ring);
1441 } else
1442 DRM_ERROR("The master control interrupt lied (GT1)!\n");
1443 }
1444
1445 if (master_ctl & GEN8_GT_PM_IRQ) {
1446 tmp = I915_READ(GEN8_GT_IIR(2));
1447 if (tmp & dev_priv->pm_rps_events) {
1448 I915_WRITE(GEN8_GT_IIR(2),
1449 tmp & dev_priv->pm_rps_events);
1450 ret = IRQ_HANDLED;
1451 gen6_rps_irq_handler(dev_priv, tmp);
1452 } else
1453 DRM_ERROR("The master control interrupt lied (PM)!\n");
1454 }
1455
1456 if (master_ctl & GEN8_GT_VECS_IRQ) {
1457 tmp = I915_READ(GEN8_GT_IIR(3));
1458 if (tmp) {
1459 I915_WRITE(GEN8_GT_IIR(3), tmp);
1460 ret = IRQ_HANDLED;
1461
1462 vcs = tmp >> GEN8_VECS_IRQ_SHIFT;
1463 ring = &dev_priv->ring[VECS];
1464 if (vcs & GT_RENDER_USER_INTERRUPT)
1465 notify_ring(dev, ring);
1466 if (vcs & GT_CONTEXT_SWITCH_INTERRUPT)
1467 intel_lrc_irq_handler(ring);
1468 } else
1469 DRM_ERROR("The master control interrupt lied (GT3)!\n");
1470 }
1471
1472 return ret;
1473 }
1474
1475 #define HPD_STORM_DETECT_PERIOD 1000
1476 #define HPD_STORM_THRESHOLD 5
1477
1478 static int pch_port_to_hotplug_shift(enum port port)
1479 {
1480 switch (port) {
1481 case PORT_A:
1482 case PORT_E:
1483 default:
1484 return -1;
1485 case PORT_B:
1486 return 0;
1487 case PORT_C:
1488 return 8;
1489 case PORT_D:
1490 return 16;
1491 }
1492 }
1493
1494 static int i915_port_to_hotplug_shift(enum port port)
1495 {
1496 switch (port) {
1497 case PORT_A:
1498 case PORT_E:
1499 default:
1500 return -1;
1501 case PORT_B:
1502 return 17;
1503 case PORT_C:
1504 return 19;
1505 case PORT_D:
1506 return 21;
1507 }
1508 }
1509
1510 static inline enum port get_port_from_pin(enum hpd_pin pin)
1511 {
1512 switch (pin) {
1513 case HPD_PORT_B:
1514 return PORT_B;
1515 case HPD_PORT_C:
1516 return PORT_C;
1517 case HPD_PORT_D:
1518 return PORT_D;
1519 default:
1520 return PORT_A; /* no hpd */
1521 }
1522 }
1523
1524 static inline void intel_hpd_irq_handler(struct drm_device *dev,
1525 u32 hotplug_trigger,
1526 u32 dig_hotplug_reg,
1527 const u32 hpd[HPD_NUM_PINS])
1528 {
1529 struct drm_i915_private *dev_priv = dev->dev_private;
1530 int i;
1531 enum port port;
1532 bool storm_detected = false;
1533 bool queue_dig = false, queue_hp = false;
1534 u32 dig_shift;
1535 u32 dig_port_mask = 0;
1536
1537 if (!hotplug_trigger)
1538 return;
1539
1540 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n",
1541 hotplug_trigger, dig_hotplug_reg);
1542
1543 spin_lock(&dev_priv->irq_lock);
1544 for (i = 1; i < HPD_NUM_PINS; i++) {
1545 if (!(hpd[i] & hotplug_trigger))
1546 continue;
1547
1548 port = get_port_from_pin(i);
1549 if (port && dev_priv->hpd_irq_port[port]) {
1550 bool long_hpd;
1551
1552 if (HAS_PCH_SPLIT(dev)) {
1553 dig_shift = pch_port_to_hotplug_shift(port);
1554 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1555 } else {
1556 dig_shift = i915_port_to_hotplug_shift(port);
1557 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1558 }
1559
1560 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
1561 port_name(port),
1562 long_hpd ? "long" : "short");
1563 /* for long HPD pulses we want to have the digital queue happen,
1564 but we still want HPD storm detection to function. */
1565 if (long_hpd) {
1566 dev_priv->long_hpd_port_mask |= (1 << port);
1567 dig_port_mask |= hpd[i];
1568 } else {
1569 /* for short HPD just trigger the digital queue */
1570 dev_priv->short_hpd_port_mask |= (1 << port);
1571 hotplug_trigger &= ~hpd[i];
1572 }
1573 queue_dig = true;
1574 }
1575 }
1576
1577 for (i = 1; i < HPD_NUM_PINS; i++) {
1578 if (hpd[i] & hotplug_trigger &&
1579 dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
1580 /*
1581 * On GMCH platforms the interrupt mask bits only
1582 * prevent irq generation, not the setting of the
1583 * hotplug bits itself. So only WARN about unexpected
1584 * interrupts on saner platforms.
1585 */
1586 WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
1587 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
1588 hotplug_trigger, i, hpd[i]);
1589
1590 continue;
1591 }
1592
1593 if (!(hpd[i] & hotplug_trigger) ||
1594 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
1595 continue;
1596
1597 if (!(dig_port_mask & hpd[i])) {
1598 dev_priv->hpd_event_bits |= (1 << i);
1599 queue_hp = true;
1600 }
1601
1602 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
1603 dev_priv->hpd_stats[i].hpd_last_jiffies
1604 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
1605 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
1606 dev_priv->hpd_stats[i].hpd_cnt = 0;
1607 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i);
1608 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
1609 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
1610 dev_priv->hpd_event_bits &= ~(1 << i);
1611 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
1612 storm_detected = true;
1613 } else {
1614 dev_priv->hpd_stats[i].hpd_cnt++;
1615 DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i,
1616 dev_priv->hpd_stats[i].hpd_cnt);
1617 }
1618 }
1619
1620 if (storm_detected)
1621 dev_priv->display.hpd_irq_setup(dev);
1622 spin_unlock(&dev_priv->irq_lock);
1623
1624 /*
1625 * Our hotplug handler can grab modeset locks (by calling down into the
1626 * fb helpers). Hence it must not be run on our own dev-priv->wq work
1627 * queue for otherwise the flush_work in the pageflip code will
1628 * deadlock.
1629 */
1630 if (queue_dig)
1631 queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work);
1632 if (queue_hp)
1633 schedule_work(&dev_priv->hotplug_work);
1634 }
1635
1636 static void gmbus_irq_handler(struct drm_device *dev)
1637 {
1638 struct drm_i915_private *dev_priv = dev->dev_private;
1639
1640 wake_up_all(&dev_priv->gmbus_wait_queue);
1641 }
1642
1643 static void dp_aux_irq_handler(struct drm_device *dev)
1644 {
1645 struct drm_i915_private *dev_priv = dev->dev_private;
1646
1647 wake_up_all(&dev_priv->gmbus_wait_queue);
1648 }
1649
1650 #if defined(CONFIG_DEBUG_FS)
1651 static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1652 uint32_t crc0, uint32_t crc1,
1653 uint32_t crc2, uint32_t crc3,
1654 uint32_t crc4)
1655 {
1656 struct drm_i915_private *dev_priv = dev->dev_private;
1657 struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
1658 struct intel_pipe_crc_entry *entry;
1659 int head, tail;
1660
1661 spin_lock(&pipe_crc->lock);
1662
1663 if (!pipe_crc->entries) {
1664 spin_unlock(&pipe_crc->lock);
1665 DRM_DEBUG_KMS("spurious interrupt\n");
1666 return;
1667 }
1668
1669 head = pipe_crc->head;
1670 tail = pipe_crc->tail;
1671
1672 if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) {
1673 spin_unlock(&pipe_crc->lock);
1674 DRM_ERROR("CRC buffer overflowing\n");
1675 return;
1676 }
1677
1678 entry = &pipe_crc->entries[head];
1679
1680 entry->frame = dev->driver->get_vblank_counter(dev, pipe);
1681 entry->crc[0] = crc0;
1682 entry->crc[1] = crc1;
1683 entry->crc[2] = crc2;
1684 entry->crc[3] = crc3;
1685 entry->crc[4] = crc4;
1686
1687 head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1);
1688 pipe_crc->head = head;
1689
1690 spin_unlock(&pipe_crc->lock);
1691
1692 wake_up_interruptible(&pipe_crc->wq);
1693 }
1694 #else
1695 static inline void
1696 display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe,
1697 uint32_t crc0, uint32_t crc1,
1698 uint32_t crc2, uint32_t crc3,
1699 uint32_t crc4) {}
1700 #endif
1701
1702
1703 static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1704 {
1705 struct drm_i915_private *dev_priv = dev->dev_private;
1706
1707 display_pipe_crc_irq_handler(dev, pipe,
1708 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1709 0, 0, 0, 0);
1710 }
1711
1712 static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1713 {
1714 struct drm_i915_private *dev_priv = dev->dev_private;
1715
1716 display_pipe_crc_irq_handler(dev, pipe,
1717 I915_READ(PIPE_CRC_RES_1_IVB(pipe)),
1718 I915_READ(PIPE_CRC_RES_2_IVB(pipe)),
1719 I915_READ(PIPE_CRC_RES_3_IVB(pipe)),
1720 I915_READ(PIPE_CRC_RES_4_IVB(pipe)),
1721 I915_READ(PIPE_CRC_RES_5_IVB(pipe)));
1722 }
1723
1724 static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe)
1725 {
1726 struct drm_i915_private *dev_priv = dev->dev_private;
1727 uint32_t res1, res2;
1728
1729 if (INTEL_INFO(dev)->gen >= 3)
1730 res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe));
1731 else
1732 res1 = 0;
1733
1734 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
1735 res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe));
1736 else
1737 res2 = 0;
1738
1739 display_pipe_crc_irq_handler(dev, pipe,
1740 I915_READ(PIPE_CRC_RES_RED(pipe)),
1741 I915_READ(PIPE_CRC_RES_GREEN(pipe)),
1742 I915_READ(PIPE_CRC_RES_BLUE(pipe)),
1743 res1, res2);
1744 }
1745
1746 /* The RPS events need forcewake, so we add them to a work queue and mask their
1747 * IMR bits until the work is done. Other interrupts can be processed without
1748 * the work queue. */
1749 static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir)
1750 {
1751 /* TODO: RPS on GEN9+ is not supported yet. */
1752 if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9,
1753 "GEN9+: unexpected RPS IRQ\n"))
1754 return;
1755
1756 if (pm_iir & dev_priv->pm_rps_events) {
1757 spin_lock(&dev_priv->irq_lock);
1758 gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events);
1759 if (dev_priv->rps.interrupts_enabled) {
1760 dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events;
1761 queue_work(dev_priv->wq, &dev_priv->rps.work);
1762 }
1763 spin_unlock(&dev_priv->irq_lock);
1764 }
1765
1766 if (INTEL_INFO(dev_priv)->gen >= 8)
1767 return;
1768
1769 if (HAS_VEBOX(dev_priv->dev)) {
1770 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
1771 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
1772
1773 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT)
1774 DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir);
1775 }
1776 }
1777
1778 static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe)
1779 {
1780 if (!drm_handle_vblank(dev, pipe))
1781 return false;
1782
1783 return true;
1784 }
1785
1786 static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir)
1787 {
1788 struct drm_i915_private *dev_priv = dev->dev_private;
1789 u32 pipe_stats[I915_MAX_PIPES] = { };
1790 int pipe;
1791
1792 spin_lock(&dev_priv->irq_lock);
1793 for_each_pipe(dev_priv, pipe) {
1794 int reg;
1795 u32 mask, iir_bit = 0;
1796
1797 /*
1798 * PIPESTAT bits get signalled even when the interrupt is
1799 * disabled with the mask bits, and some of the status bits do
1800 * not generate interrupts at all (like the underrun bit). Hence
1801 * we need to be careful that we only handle what we want to
1802 * handle.
1803 */
1804
1805 /* fifo underruns are filterered in the underrun handler. */
1806 mask = PIPE_FIFO_UNDERRUN_STATUS;
1807
1808 switch (pipe) {
1809 case PIPE_A:
1810 iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT;
1811 break;
1812 case PIPE_B:
1813 iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
1814 break;
1815 case PIPE_C:
1816 iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
1817 break;
1818 }
1819 if (iir & iir_bit)
1820 mask |= dev_priv->pipestat_irq_mask[pipe];
1821
1822 if (!mask)
1823 continue;
1824
1825 reg = PIPESTAT(pipe);
1826 mask |= PIPESTAT_INT_ENABLE_MASK;
1827 pipe_stats[pipe] = I915_READ(reg) & mask;
1828
1829 /*
1830 * Clear the PIPE*STAT regs before the IIR
1831 */
1832 if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS |
1833 PIPESTAT_INT_STATUS_MASK))
1834 I915_WRITE(reg, pipe_stats[pipe]);
1835 }
1836 spin_unlock(&dev_priv->irq_lock);
1837
1838 for_each_pipe(dev_priv, pipe) {
1839 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
1840 intel_pipe_handle_vblank(dev, pipe))
1841 intel_check_page_flip(dev, pipe);
1842
1843 if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) {
1844 intel_prepare_page_flip(dev, pipe);
1845 intel_finish_page_flip(dev, pipe);
1846 }
1847
1848 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
1849 i9xx_pipe_crc_irq_handler(dev, pipe);
1850
1851 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
1852 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
1853 }
1854
1855 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1856 gmbus_irq_handler(dev);
1857 }
1858
1859 static void i9xx_hpd_irq_handler(struct drm_device *dev)
1860 {
1861 struct drm_i915_private *dev_priv = dev->dev_private;
1862 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
1863
1864 if (hotplug_status) {
1865 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1866 /*
1867 * Make sure hotplug status is cleared before we clear IIR, or else we
1868 * may miss hotplug events.
1869 */
1870 POSTING_READ(PORT_HOTPLUG_STAT);
1871
1872 if (IS_G4X(dev)) {
1873 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X;
1874
1875 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x);
1876 } else {
1877 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
1878
1879 intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915);
1880 }
1881
1882 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) &&
1883 hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
1884 dp_aux_irq_handler(dev);
1885 }
1886 }
1887
1888 static irqreturn_t valleyview_irq_handler(int irq, void *arg)
1889 {
1890 struct drm_device *dev = arg;
1891 struct drm_i915_private *dev_priv = dev->dev_private;
1892 u32 iir, gt_iir, pm_iir;
1893 irqreturn_t ret = IRQ_NONE;
1894
1895 if (!intel_irqs_enabled(dev_priv))
1896 return IRQ_NONE;
1897
1898 while (true) {
1899 /* Find, clear, then process each source of interrupt */
1900
1901 gt_iir = I915_READ(GTIIR);
1902 if (gt_iir)
1903 I915_WRITE(GTIIR, gt_iir);
1904
1905 pm_iir = I915_READ(GEN6_PMIIR);
1906 if (pm_iir)
1907 I915_WRITE(GEN6_PMIIR, pm_iir);
1908
1909 iir = I915_READ(VLV_IIR);
1910 if (iir) {
1911 /* Consume port before clearing IIR or we'll miss events */
1912 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1913 i9xx_hpd_irq_handler(dev);
1914 I915_WRITE(VLV_IIR, iir);
1915 }
1916
1917 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
1918 goto out;
1919
1920 ret = IRQ_HANDLED;
1921
1922 if (gt_iir)
1923 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1924 if (pm_iir)
1925 gen6_rps_irq_handler(dev_priv, pm_iir);
1926 /* Call regardless, as some status bits might not be
1927 * signalled in iir */
1928 valleyview_pipestat_irq_handler(dev, iir);
1929 }
1930
1931 out:
1932 return ret;
1933 }
1934
1935 static irqreturn_t cherryview_irq_handler(int irq, void *arg)
1936 {
1937 struct drm_device *dev = arg;
1938 struct drm_i915_private *dev_priv = dev->dev_private;
1939 u32 master_ctl, iir;
1940 irqreturn_t ret = IRQ_NONE;
1941
1942 if (!intel_irqs_enabled(dev_priv))
1943 return IRQ_NONE;
1944
1945 for (;;) {
1946 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
1947 iir = I915_READ(VLV_IIR);
1948
1949 if (master_ctl == 0 && iir == 0)
1950 break;
1951
1952 ret = IRQ_HANDLED;
1953
1954 I915_WRITE(GEN8_MASTER_IRQ, 0);
1955
1956 /* Find, clear, then process each source of interrupt */
1957
1958 if (iir) {
1959 /* Consume port before clearing IIR or we'll miss events */
1960 if (iir & I915_DISPLAY_PORT_INTERRUPT)
1961 i9xx_hpd_irq_handler(dev);
1962 I915_WRITE(VLV_IIR, iir);
1963 }
1964
1965 gen8_gt_irq_handler(dev, dev_priv, master_ctl);
1966
1967 /* Call regardless, as some status bits might not be
1968 * signalled in iir */
1969 valleyview_pipestat_irq_handler(dev, iir);
1970
1971 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
1972 POSTING_READ(GEN8_MASTER_IRQ);
1973 }
1974
1975 return ret;
1976 }
1977
1978 static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
1979 {
1980 struct drm_i915_private *dev_priv = dev->dev_private;
1981 int pipe;
1982 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
1983 u32 dig_hotplug_reg;
1984
1985 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
1986 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
1987
1988 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx);
1989
1990 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1991 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1992 SDE_AUDIO_POWER_SHIFT);
1993 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
1994 port_name(port));
1995 }
1996
1997 if (pch_iir & SDE_AUX_MASK)
1998 dp_aux_irq_handler(dev);
1999
2000 if (pch_iir & SDE_GMBUS)
2001 gmbus_irq_handler(dev);
2002
2003 if (pch_iir & SDE_AUDIO_HDCP_MASK)
2004 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
2005
2006 if (pch_iir & SDE_AUDIO_TRANS_MASK)
2007 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
2008
2009 if (pch_iir & SDE_POISON)
2010 DRM_ERROR("PCH poison interrupt\n");
2011
2012 if (pch_iir & SDE_FDI_MASK)
2013 for_each_pipe(dev_priv, pipe)
2014 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2015 pipe_name(pipe),
2016 I915_READ(FDI_RX_IIR(pipe)));
2017
2018 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
2019 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
2020
2021 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
2022 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
2023
2024 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
2025 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2026
2027 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
2028 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2029 }
2030
2031 static void ivb_err_int_handler(struct drm_device *dev)
2032 {
2033 struct drm_i915_private *dev_priv = dev->dev_private;
2034 u32 err_int = I915_READ(GEN7_ERR_INT);
2035 enum pipe pipe;
2036
2037 if (err_int & ERR_INT_POISON)
2038 DRM_ERROR("Poison interrupt\n");
2039
2040 for_each_pipe(dev_priv, pipe) {
2041 if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
2042 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2043
2044 if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) {
2045 if (IS_IVYBRIDGE(dev))
2046 ivb_pipe_crc_irq_handler(dev, pipe);
2047 else
2048 hsw_pipe_crc_irq_handler(dev, pipe);
2049 }
2050 }
2051
2052 I915_WRITE(GEN7_ERR_INT, err_int);
2053 }
2054
2055 static void cpt_serr_int_handler(struct drm_device *dev)
2056 {
2057 struct drm_i915_private *dev_priv = dev->dev_private;
2058 u32 serr_int = I915_READ(SERR_INT);
2059
2060 if (serr_int & SERR_INT_POISON)
2061 DRM_ERROR("PCH poison interrupt\n");
2062
2063 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
2064 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A);
2065
2066 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
2067 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B);
2068
2069 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
2070 intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C);
2071
2072 I915_WRITE(SERR_INT, serr_int);
2073 }
2074
2075 static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
2076 {
2077 struct drm_i915_private *dev_priv = dev->dev_private;
2078 int pipe;
2079 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
2080 u32 dig_hotplug_reg;
2081
2082 dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG);
2083 I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg);
2084
2085 intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt);
2086
2087 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
2088 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
2089 SDE_AUDIO_POWER_SHIFT_CPT);
2090 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
2091 port_name(port));
2092 }
2093
2094 if (pch_iir & SDE_AUX_MASK_CPT)
2095 dp_aux_irq_handler(dev);
2096
2097 if (pch_iir & SDE_GMBUS_CPT)
2098 gmbus_irq_handler(dev);
2099
2100 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
2101 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
2102
2103 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
2104 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
2105
2106 if (pch_iir & SDE_FDI_MASK_CPT)
2107 for_each_pipe(dev_priv, pipe)
2108 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
2109 pipe_name(pipe),
2110 I915_READ(FDI_RX_IIR(pipe)));
2111
2112 if (pch_iir & SDE_ERROR_CPT)
2113 cpt_serr_int_handler(dev);
2114 }
2115
2116 static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir)
2117 {
2118 struct drm_i915_private *dev_priv = dev->dev_private;
2119 enum pipe pipe;
2120
2121 if (de_iir & DE_AUX_CHANNEL_A)
2122 dp_aux_irq_handler(dev);
2123
2124 if (de_iir & DE_GSE)
2125 intel_opregion_asle_intr(dev);
2126
2127 if (de_iir & DE_POISON)
2128 DRM_ERROR("Poison interrupt\n");
2129
2130 for_each_pipe(dev_priv, pipe) {
2131 if (de_iir & DE_PIPE_VBLANK(pipe) &&
2132 intel_pipe_handle_vblank(dev, pipe))
2133 intel_check_page_flip(dev, pipe);
2134
2135 if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
2136 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
2137
2138 if (de_iir & DE_PIPE_CRC_DONE(pipe))
2139 i9xx_pipe_crc_irq_handler(dev, pipe);
2140
2141 /* plane/pipes map 1:1 on ilk+ */
2142 if (de_iir & DE_PLANE_FLIP_DONE(pipe)) {
2143 intel_prepare_page_flip(dev, pipe);
2144 intel_finish_page_flip_plane(dev, pipe);
2145 }
2146 }
2147
2148 /* check event from PCH */
2149 if (de_iir & DE_PCH_EVENT) {
2150 u32 pch_iir = I915_READ(SDEIIR);
2151
2152 if (HAS_PCH_CPT(dev))
2153 cpt_irq_handler(dev, pch_iir);
2154 else
2155 ibx_irq_handler(dev, pch_iir);
2156
2157 /* should clear PCH hotplug event before clear CPU irq */
2158 I915_WRITE(SDEIIR, pch_iir);
2159 }
2160
2161 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
2162 ironlake_rps_change_irq_handler(dev);
2163 }
2164
2165 static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir)
2166 {
2167 struct drm_i915_private *dev_priv = dev->dev_private;
2168 enum pipe pipe;
2169
2170 if (de_iir & DE_ERR_INT_IVB)
2171 ivb_err_int_handler(dev);
2172
2173 if (de_iir & DE_AUX_CHANNEL_A_IVB)
2174 dp_aux_irq_handler(dev);
2175
2176 if (de_iir & DE_GSE_IVB)
2177 intel_opregion_asle_intr(dev);
2178
2179 for_each_pipe(dev_priv, pipe) {
2180 if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) &&
2181 intel_pipe_handle_vblank(dev, pipe))
2182 intel_check_page_flip(dev, pipe);
2183
2184 /* plane/pipes map 1:1 on ilk+ */
2185 if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) {
2186 intel_prepare_page_flip(dev, pipe);
2187 intel_finish_page_flip_plane(dev, pipe);
2188 }
2189 }
2190
2191 /* check event from PCH */
2192 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
2193 u32 pch_iir = I915_READ(SDEIIR);
2194
2195 cpt_irq_handler(dev, pch_iir);
2196
2197 /* clear PCH hotplug event before clear CPU irq */
2198 I915_WRITE(SDEIIR, pch_iir);
2199 }
2200 }
2201
2202 /*
2203 * To handle irqs with the minimum potential races with fresh interrupts, we:
2204 * 1 - Disable Master Interrupt Control.
2205 * 2 - Find the source(s) of the interrupt.
2206 * 3 - Clear the Interrupt Identity bits (IIR).
2207 * 4 - Process the interrupt(s) that had bits set in the IIRs.
2208 * 5 - Re-enable Master Interrupt Control.
2209 */
2210 static irqreturn_t ironlake_irq_handler(int irq, void *arg)
2211 {
2212 struct drm_device *dev = arg;
2213 struct drm_i915_private *dev_priv = dev->dev_private;
2214 u32 de_iir, gt_iir, de_ier, sde_ier = 0;
2215 irqreturn_t ret = IRQ_NONE;
2216
2217 if (!intel_irqs_enabled(dev_priv))
2218 return IRQ_NONE;
2219
2220 /* We get interrupts on unclaimed registers, so check for this before we
2221 * do any I915_{READ,WRITE}. */
2222 intel_uncore_check_errors(dev);
2223
2224 /* disable master interrupt before clearing iir */
2225 de_ier = I915_READ(DEIER);
2226 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
2227 POSTING_READ(DEIER);
2228
2229 /* Disable south interrupts. We'll only write to SDEIIR once, so further
2230 * interrupts will will be stored on its back queue, and then we'll be
2231 * able to process them after we restore SDEIER (as soon as we restore
2232 * it, we'll get an interrupt if SDEIIR still has something to process
2233 * due to its back queue). */
2234 if (!HAS_PCH_NOP(dev)) {
2235 sde_ier = I915_READ(SDEIER);
2236 I915_WRITE(SDEIER, 0);
2237 POSTING_READ(SDEIER);
2238 }
2239
2240 /* Find, clear, then process each source of interrupt */
2241
2242 gt_iir = I915_READ(GTIIR);
2243 if (gt_iir) {
2244 I915_WRITE(GTIIR, gt_iir);
2245 ret = IRQ_HANDLED;
2246 if (INTEL_INFO(dev)->gen >= 6)
2247 snb_gt_irq_handler(dev, dev_priv, gt_iir);
2248 else
2249 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
2250 }
2251
2252 de_iir = I915_READ(DEIIR);
2253 if (de_iir) {
2254 I915_WRITE(DEIIR, de_iir);
2255 ret = IRQ_HANDLED;
2256 if (INTEL_INFO(dev)->gen >= 7)
2257 ivb_display_irq_handler(dev, de_iir);
2258 else
2259 ilk_display_irq_handler(dev, de_iir);
2260 }
2261
2262 if (INTEL_INFO(dev)->gen >= 6) {
2263 u32 pm_iir = I915_READ(GEN6_PMIIR);
2264 if (pm_iir) {
2265 I915_WRITE(GEN6_PMIIR, pm_iir);
2266 ret = IRQ_HANDLED;
2267 gen6_rps_irq_handler(dev_priv, pm_iir);
2268 }
2269 }
2270
2271 I915_WRITE(DEIER, de_ier);
2272 POSTING_READ(DEIER);
2273 if (!HAS_PCH_NOP(dev)) {
2274 I915_WRITE(SDEIER, sde_ier);
2275 POSTING_READ(SDEIER);
2276 }
2277
2278 return ret;
2279 }
2280
2281 static irqreturn_t gen8_irq_handler(int irq, void *arg)
2282 {
2283 struct drm_device *dev = arg;
2284 struct drm_i915_private *dev_priv = dev->dev_private;
2285 u32 master_ctl;
2286 irqreturn_t ret = IRQ_NONE;
2287 uint32_t tmp = 0;
2288 enum pipe pipe;
2289 u32 aux_mask = GEN8_AUX_CHANNEL_A;
2290
2291 if (!intel_irqs_enabled(dev_priv))
2292 return IRQ_NONE;
2293
2294 if (IS_GEN9(dev))
2295 aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
2296 GEN9_AUX_CHANNEL_D;
2297
2298 master_ctl = I915_READ(GEN8_MASTER_IRQ);
2299 master_ctl &= ~GEN8_MASTER_IRQ_CONTROL;
2300 if (!master_ctl)
2301 return IRQ_NONE;
2302
2303 I915_WRITE(GEN8_MASTER_IRQ, 0);
2304 POSTING_READ(GEN8_MASTER_IRQ);
2305
2306 /* Find, clear, then process each source of interrupt */
2307
2308 ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl);
2309
2310 if (master_ctl & GEN8_DE_MISC_IRQ) {
2311 tmp = I915_READ(GEN8_DE_MISC_IIR);
2312 if (tmp) {
2313 I915_WRITE(GEN8_DE_MISC_IIR, tmp);
2314 ret = IRQ_HANDLED;
2315 if (tmp & GEN8_DE_MISC_GSE)
2316 intel_opregion_asle_intr(dev);
2317 else
2318 DRM_ERROR("Unexpected DE Misc interrupt\n");
2319 }
2320 else
2321 DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
2322 }
2323
2324 if (master_ctl & GEN8_DE_PORT_IRQ) {
2325 tmp = I915_READ(GEN8_DE_PORT_IIR);
2326 if (tmp) {
2327 I915_WRITE(GEN8_DE_PORT_IIR, tmp);
2328 ret = IRQ_HANDLED;
2329
2330 if (tmp & aux_mask)
2331 dp_aux_irq_handler(dev);
2332 else
2333 DRM_ERROR("Unexpected DE Port interrupt\n");
2334 }
2335 else
2336 DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
2337 }
2338
2339 for_each_pipe(dev_priv, pipe) {
2340 uint32_t pipe_iir, flip_done = 0, fault_errors = 0;
2341
2342 if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe)))
2343 continue;
2344
2345 pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
2346 if (pipe_iir) {
2347 ret = IRQ_HANDLED;
2348 I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir);
2349
2350 if (pipe_iir & GEN8_PIPE_VBLANK &&
2351 intel_pipe_handle_vblank(dev, pipe))
2352 intel_check_page_flip(dev, pipe);
2353
2354 if (IS_GEN9(dev))
2355 flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE;
2356 else
2357 flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE;
2358
2359 if (flip_done) {
2360 intel_prepare_page_flip(dev, pipe);
2361 intel_finish_page_flip_plane(dev, pipe);
2362 }
2363
2364 if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE)
2365 hsw_pipe_crc_irq_handler(dev, pipe);
2366
2367 if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN)
2368 intel_cpu_fifo_underrun_irq_handler(dev_priv,
2369 pipe);
2370
2371
2372 if (IS_GEN9(dev))
2373 fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
2374 else
2375 fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
2376
2377 if (fault_errors)
2378 DRM_ERROR("Fault errors on pipe %c\n: 0x%08x",
2379 pipe_name(pipe),
2380 pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS);
2381 } else
2382 DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
2383 }
2384
2385 if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) {
2386 /*
2387 * FIXME(BDW): Assume for now that the new interrupt handling
2388 * scheme also closed the SDE interrupt handling race we've seen
2389 * on older pch-split platforms. But this needs testing.
2390 */
2391 u32 pch_iir = I915_READ(SDEIIR);
2392 if (pch_iir) {
2393 I915_WRITE(SDEIIR, pch_iir);
2394 ret = IRQ_HANDLED;
2395 cpt_irq_handler(dev, pch_iir);
2396 } else
2397 DRM_ERROR("The master control interrupt lied (SDE)!\n");
2398
2399 }
2400
2401 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
2402 POSTING_READ(GEN8_MASTER_IRQ);
2403
2404 return ret;
2405 }
2406
2407 static void i915_error_wake_up(struct drm_i915_private *dev_priv,
2408 bool reset_completed)
2409 {
2410 struct intel_engine_cs *ring;
2411 int i;
2412
2413 /*
2414 * Notify all waiters for GPU completion events that reset state has
2415 * been changed, and that they need to restart their wait after
2416 * checking for potential errors (and bail out to drop locks if there is
2417 * a gpu reset pending so that i915_error_work_func can acquire them).
2418 */
2419
2420 /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */
2421 for_each_ring(ring, dev_priv, i)
2422 wake_up_all(&ring->irq_queue);
2423
2424 /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */
2425 wake_up_all(&dev_priv->pending_flip_queue);
2426
2427 /*
2428 * Signal tasks blocked in i915_gem_wait_for_error that the pending
2429 * reset state is cleared.
2430 */
2431 if (reset_completed)
2432 wake_up_all(&dev_priv->gpu_error.reset_queue);
2433 }
2434
2435 /**
2436 * i915_reset_and_wakeup - do process context error handling work
2437 *
2438 * Fire an error uevent so userspace can see that a hang or error
2439 * was detected.
2440 */
2441 static void i915_reset_and_wakeup(struct drm_device *dev)
2442 {
2443 struct drm_i915_private *dev_priv = to_i915(dev);
2444 struct i915_gpu_error *error = &dev_priv->gpu_error;
2445 char *error_event[] = { I915_ERROR_UEVENT "=1", NULL };
2446 char *reset_event[] = { I915_RESET_UEVENT "=1", NULL };
2447 char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL };
2448 int ret;
2449
2450 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event);
2451
2452 /*
2453 * Note that there's only one work item which does gpu resets, so we
2454 * need not worry about concurrent gpu resets potentially incrementing
2455 * error->reset_counter twice. We only need to take care of another
2456 * racing irq/hangcheck declaring the gpu dead for a second time. A
2457 * quick check for that is good enough: schedule_work ensures the
2458 * correct ordering between hang detection and this work item, and since
2459 * the reset in-progress bit is only ever set by code outside of this
2460 * work we don't need to worry about any other races.
2461 */
2462 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
2463 DRM_DEBUG_DRIVER("resetting chip\n");
2464 kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
2465 reset_event);
2466
2467 /*
2468 * In most cases it's guaranteed that we get here with an RPM
2469 * reference held, for example because there is a pending GPU
2470 * request that won't finish until the reset is done. This
2471 * isn't the case at least when we get here by doing a
2472 * simulated reset via debugs, so get an RPM reference.
2473 */
2474 intel_runtime_pm_get(dev_priv);
2475
2476 intel_prepare_reset(dev);
2477
2478 /*
2479 * All state reset _must_ be completed before we update the
2480 * reset counter, for otherwise waiters might miss the reset
2481 * pending state and not properly drop locks, resulting in
2482 * deadlocks with the reset work.
2483 */
2484 ret = i915_reset(dev);
2485
2486 intel_finish_reset(dev);
2487
2488 intel_runtime_pm_put(dev_priv);
2489
2490 if (ret == 0) {
2491 /*
2492 * After all the gem state is reset, increment the reset
2493 * counter and wake up everyone waiting for the reset to
2494 * complete.
2495 *
2496 * Since unlock operations are a one-sided barrier only,
2497 * we need to insert a barrier here to order any seqno
2498 * updates before
2499 * the counter increment.
2500 */
2501 smp_mb__before_atomic();
2502 atomic_inc(&dev_priv->gpu_error.reset_counter);
2503
2504 kobject_uevent_env(&dev->primary->kdev->kobj,
2505 KOBJ_CHANGE, reset_done_event);
2506 } else {
2507 atomic_set_mask(I915_WEDGED, &error->reset_counter);
2508 }
2509
2510 /*
2511 * Note: The wake_up also serves as a memory barrier so that
2512 * waiters see the update value of the reset counter atomic_t.
2513 */
2514 i915_error_wake_up(dev_priv, true);
2515 }
2516 }
2517
2518 static void i915_report_and_clear_eir(struct drm_device *dev)
2519 {
2520 struct drm_i915_private *dev_priv = dev->dev_private;
2521 uint32_t instdone[I915_NUM_INSTDONE_REG];
2522 u32 eir = I915_READ(EIR);
2523 int pipe, i;
2524
2525 if (!eir)
2526 return;
2527
2528 pr_err("render error detected, EIR: 0x%08x\n", eir);
2529
2530 i915_get_extra_instdone(dev, instdone);
2531
2532 if (IS_G4X(dev)) {
2533 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2534 u32 ipeir = I915_READ(IPEIR_I965);
2535
2536 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2537 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2538 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2539 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2540 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2541 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2542 I915_WRITE(IPEIR_I965, ipeir);
2543 POSTING_READ(IPEIR_I965);
2544 }
2545 if (eir & GM45_ERROR_PAGE_TABLE) {
2546 u32 pgtbl_err = I915_READ(PGTBL_ER);
2547 pr_err("page table error\n");
2548 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2549 I915_WRITE(PGTBL_ER, pgtbl_err);
2550 POSTING_READ(PGTBL_ER);
2551 }
2552 }
2553
2554 if (!IS_GEN2(dev)) {
2555 if (eir & I915_ERROR_PAGE_TABLE) {
2556 u32 pgtbl_err = I915_READ(PGTBL_ER);
2557 pr_err("page table error\n");
2558 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
2559 I915_WRITE(PGTBL_ER, pgtbl_err);
2560 POSTING_READ(PGTBL_ER);
2561 }
2562 }
2563
2564 if (eir & I915_ERROR_MEMORY_REFRESH) {
2565 pr_err("memory refresh error:\n");
2566 for_each_pipe(dev_priv, pipe)
2567 pr_err("pipe %c stat: 0x%08x\n",
2568 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
2569 /* pipestat has already been acked */
2570 }
2571 if (eir & I915_ERROR_INSTRUCTION) {
2572 pr_err("instruction error\n");
2573 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
2574 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2575 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
2576 if (INTEL_INFO(dev)->gen < 4) {
2577 u32 ipeir = I915_READ(IPEIR);
2578
2579 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2580 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
2581 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
2582 I915_WRITE(IPEIR, ipeir);
2583 POSTING_READ(IPEIR);
2584 } else {
2585 u32 ipeir = I915_READ(IPEIR_I965);
2586
2587 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2588 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
2589 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
2590 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
2591 I915_WRITE(IPEIR_I965, ipeir);
2592 POSTING_READ(IPEIR_I965);
2593 }
2594 }
2595
2596 I915_WRITE(EIR, eir);
2597 POSTING_READ(EIR);
2598 eir = I915_READ(EIR);
2599 if (eir) {
2600 /*
2601 * some errors might have become stuck,
2602 * mask them.
2603 */
2604 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2605 I915_WRITE(EMR, I915_READ(EMR) | eir);
2606 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2607 }
2608 }
2609
2610 /**
2611 * i915_handle_error - handle a gpu error
2612 * @dev: drm device
2613 *
2614 * Do some basic checking of regsiter state at error time and
2615 * dump it to the syslog. Also call i915_capture_error_state() to make
2616 * sure we get a record and make it available in debugfs. Fire a uevent
2617 * so userspace knows something bad happened (should trigger collection
2618 * of a ring dump etc.).
2619 */
2620 void i915_handle_error(struct drm_device *dev, bool wedged,
2621 const char *fmt, ...)
2622 {
2623 struct drm_i915_private *dev_priv = dev->dev_private;
2624 va_list args;
2625 char error_msg[80];
2626
2627 va_start(args, fmt);
2628 vscnprintf(error_msg, sizeof(error_msg), fmt, args);
2629 va_end(args);
2630
2631 i915_capture_error_state(dev, wedged, error_msg);
2632 i915_report_and_clear_eir(dev);
2633
2634 if (wedged) {
2635 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2636 &dev_priv->gpu_error.reset_counter);
2637
2638 /*
2639 * Wakeup waiting processes so that the reset function
2640 * i915_reset_and_wakeup doesn't deadlock trying to grab
2641 * various locks. By bumping the reset counter first, the woken
2642 * processes will see a reset in progress and back off,
2643 * releasing their locks and then wait for the reset completion.
2644 * We must do this for _all_ gpu waiters that might hold locks
2645 * that the reset work needs to acquire.
2646 *
2647 * Note: The wake_up serves as the required memory barrier to
2648 * ensure that the waiters see the updated value of the reset
2649 * counter atomic_t.
2650 */
2651 i915_error_wake_up(dev_priv, false);
2652 }
2653
2654 i915_reset_and_wakeup(dev);
2655 }
2656
2657 /* Called from drm generic code, passed 'crtc' which
2658 * we use as a pipe index
2659 */
2660 static int i915_enable_vblank(struct drm_device *dev, int pipe)
2661 {
2662 struct drm_i915_private *dev_priv = dev->dev_private;
2663 unsigned long irqflags;
2664
2665 if (!i915_pipe_enabled(dev, pipe))
2666 return -EINVAL;
2667
2668 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2669 if (INTEL_INFO(dev)->gen >= 4)
2670 i915_enable_pipestat(dev_priv, pipe,
2671 PIPE_START_VBLANK_INTERRUPT_STATUS);
2672 else
2673 i915_enable_pipestat(dev_priv, pipe,
2674 PIPE_VBLANK_INTERRUPT_STATUS);
2675 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2676
2677 return 0;
2678 }
2679
2680 static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
2681 {
2682 struct drm_i915_private *dev_priv = dev->dev_private;
2683 unsigned long irqflags;
2684 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2685 DE_PIPE_VBLANK(pipe);
2686
2687 if (!i915_pipe_enabled(dev, pipe))
2688 return -EINVAL;
2689
2690 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2691 ironlake_enable_display_irq(dev_priv, bit);
2692 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2693
2694 return 0;
2695 }
2696
2697 static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2698 {
2699 struct drm_i915_private *dev_priv = dev->dev_private;
2700 unsigned long irqflags;
2701
2702 if (!i915_pipe_enabled(dev, pipe))
2703 return -EINVAL;
2704
2705 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2706 i915_enable_pipestat(dev_priv, pipe,
2707 PIPE_START_VBLANK_INTERRUPT_STATUS);
2708 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2709
2710 return 0;
2711 }
2712
2713 static int gen8_enable_vblank(struct drm_device *dev, int pipe)
2714 {
2715 struct drm_i915_private *dev_priv = dev->dev_private;
2716 unsigned long irqflags;
2717
2718 if (!i915_pipe_enabled(dev, pipe))
2719 return -EINVAL;
2720
2721 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2722 dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK;
2723 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2724 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2725 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2726 return 0;
2727 }
2728
2729 /* Called from drm generic code, passed 'crtc' which
2730 * we use as a pipe index
2731 */
2732 static void i915_disable_vblank(struct drm_device *dev, int pipe)
2733 {
2734 struct drm_i915_private *dev_priv = dev->dev_private;
2735 unsigned long irqflags;
2736
2737 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2738 i915_disable_pipestat(dev_priv, pipe,
2739 PIPE_VBLANK_INTERRUPT_STATUS |
2740 PIPE_START_VBLANK_INTERRUPT_STATUS);
2741 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2742 }
2743
2744 static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
2745 {
2746 struct drm_i915_private *dev_priv = dev->dev_private;
2747 unsigned long irqflags;
2748 uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) :
2749 DE_PIPE_VBLANK(pipe);
2750
2751 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2752 ironlake_disable_display_irq(dev_priv, bit);
2753 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2754 }
2755
2756 static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2757 {
2758 struct drm_i915_private *dev_priv = dev->dev_private;
2759 unsigned long irqflags;
2760
2761 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2762 i915_disable_pipestat(dev_priv, pipe,
2763 PIPE_START_VBLANK_INTERRUPT_STATUS);
2764 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2765 }
2766
2767 static void gen8_disable_vblank(struct drm_device *dev, int pipe)
2768 {
2769 struct drm_i915_private *dev_priv = dev->dev_private;
2770 unsigned long irqflags;
2771
2772 if (!i915_pipe_enabled(dev, pipe))
2773 return;
2774
2775 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2776 dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK;
2777 I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]);
2778 POSTING_READ(GEN8_DE_PIPE_IMR(pipe));
2779 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2780 }
2781
2782 static struct drm_i915_gem_request *
2783 ring_last_request(struct intel_engine_cs *ring)
2784 {
2785 return list_entry(ring->request_list.prev,
2786 struct drm_i915_gem_request, list);
2787 }
2788
2789 static bool
2790 ring_idle(struct intel_engine_cs *ring)
2791 {
2792 return (list_empty(&ring->request_list) ||
2793 i915_gem_request_completed(ring_last_request(ring), false));
2794 }
2795
2796 static bool
2797 ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr)
2798 {
2799 if (INTEL_INFO(dev)->gen >= 8) {
2800 return (ipehr >> 23) == 0x1c;
2801 } else {
2802 ipehr &= ~MI_SEMAPHORE_SYNC_MASK;
2803 return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE |
2804 MI_SEMAPHORE_REGISTER);
2805 }
2806 }
2807
2808 static struct intel_engine_cs *
2809 semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset)
2810 {
2811 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2812 struct intel_engine_cs *signaller;
2813 int i;
2814
2815 if (INTEL_INFO(dev_priv->dev)->gen >= 8) {
2816 for_each_ring(signaller, dev_priv, i) {
2817 if (ring == signaller)
2818 continue;
2819
2820 if (offset == signaller->semaphore.signal_ggtt[ring->id])
2821 return signaller;
2822 }
2823 } else {
2824 u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK;
2825
2826 for_each_ring(signaller, dev_priv, i) {
2827 if(ring == signaller)
2828 continue;
2829
2830 if (sync_bits == signaller->semaphore.mbox.wait[ring->id])
2831 return signaller;
2832 }
2833 }
2834
2835 DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n",
2836 ring->id, ipehr, offset);
2837
2838 return NULL;
2839 }
2840
2841 static struct intel_engine_cs *
2842 semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno)
2843 {
2844 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2845 u32 cmd, ipehr, head;
2846 u64 offset = 0;
2847 int i, backwards;
2848
2849 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2850 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
2851 return NULL;
2852
2853 /*
2854 * HEAD is likely pointing to the dword after the actual command,
2855 * so scan backwards until we find the MBOX. But limit it to just 3
2856 * or 4 dwords depending on the semaphore wait command size.
2857 * Note that we don't care about ACTHD here since that might
2858 * point at at batch, and semaphores are always emitted into the
2859 * ringbuffer itself.
2860 */
2861 head = I915_READ_HEAD(ring) & HEAD_ADDR;
2862 backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4;
2863
2864 for (i = backwards; i; --i) {
2865 /*
2866 * Be paranoid and presume the hw has gone off into the wild -
2867 * our ring is smaller than what the hardware (and hence
2868 * HEAD_ADDR) allows. Also handles wrap-around.
2869 */
2870 head &= ring->buffer->size - 1;
2871
2872 /* This here seems to blow up */
2873 cmd = ioread32(ring->buffer->virtual_start + head);
2874 if (cmd == ipehr)
2875 break;
2876
2877 head -= 4;
2878 }
2879
2880 if (!i)
2881 return NULL;
2882
2883 *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1;
2884 if (INTEL_INFO(ring->dev)->gen >= 8) {
2885 offset = ioread32(ring->buffer->virtual_start + head + 12);
2886 offset <<= 32;
2887 offset = ioread32(ring->buffer->virtual_start + head + 8);
2888 }
2889 return semaphore_wait_to_signaller_ring(ring, ipehr, offset);
2890 }
2891
2892 static int semaphore_passed(struct intel_engine_cs *ring)
2893 {
2894 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2895 struct intel_engine_cs *signaller;
2896 u32 seqno;
2897
2898 ring->hangcheck.deadlock++;
2899
2900 signaller = semaphore_waits_for(ring, &seqno);
2901 if (signaller == NULL)
2902 return -1;
2903
2904 /* Prevent pathological recursion due to driver bugs */
2905 if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
2906 return -1;
2907
2908 if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
2909 return 1;
2910
2911 /* cursory check for an unkickable deadlock */
2912 if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
2913 semaphore_passed(signaller) < 0)
2914 return -1;
2915
2916 return 0;
2917 }
2918
2919 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2920 {
2921 struct intel_engine_cs *ring;
2922 int i;
2923
2924 for_each_ring(ring, dev_priv, i)
2925 ring->hangcheck.deadlock = 0;
2926 }
2927
2928 static enum intel_ring_hangcheck_action
2929 ring_stuck(struct intel_engine_cs *ring, u64 acthd)
2930 {
2931 struct drm_device *dev = ring->dev;
2932 struct drm_i915_private *dev_priv = dev->dev_private;
2933 u32 tmp;
2934
2935 if (acthd != ring->hangcheck.acthd) {
2936 if (acthd > ring->hangcheck.max_acthd) {
2937 ring->hangcheck.max_acthd = acthd;
2938 return HANGCHECK_ACTIVE;
2939 }
2940
2941 return HANGCHECK_ACTIVE_LOOP;
2942 }
2943
2944 if (IS_GEN2(dev))
2945 return HANGCHECK_HUNG;
2946
2947 /* Is the chip hanging on a WAIT_FOR_EVENT?
2948 * If so we can simply poke the RB_WAIT bit
2949 * and break the hang. This should work on
2950 * all but the second generation chipsets.
2951 */
2952 tmp = I915_READ_CTL(ring);
2953 if (tmp & RING_WAIT) {
2954 i915_handle_error(dev, false,
2955 "Kicking stuck wait on %s",
2956 ring->name);
2957 I915_WRITE_CTL(ring, tmp);
2958 return HANGCHECK_KICK;
2959 }
2960
2961 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2962 switch (semaphore_passed(ring)) {
2963 default:
2964 return HANGCHECK_HUNG;
2965 case 1:
2966 i915_handle_error(dev, false,
2967 "Kicking stuck semaphore on %s",
2968 ring->name);
2969 I915_WRITE_CTL(ring, tmp);
2970 return HANGCHECK_KICK;
2971 case 0:
2972 return HANGCHECK_WAIT;
2973 }
2974 }
2975
2976 return HANGCHECK_HUNG;
2977 }
2978
2979 /*
2980 * This is called when the chip hasn't reported back with completed
2981 * batchbuffers in a long time. We keep track per ring seqno progress and
2982 * if there are no progress, hangcheck score for that ring is increased.
2983 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2984 * we kick the ring. If we see no progress on three subsequent calls
2985 * we assume chip is wedged and try to fix it by resetting the chip.
2986 */
2987 static void i915_hangcheck_elapsed(struct work_struct *work)
2988 {
2989 struct drm_i915_private *dev_priv =
2990 container_of(work, typeof(*dev_priv),
2991 gpu_error.hangcheck_work.work);
2992 struct drm_device *dev = dev_priv->dev;
2993 struct intel_engine_cs *ring;
2994 int i;
2995 int busy_count = 0, rings_hung = 0;
2996 bool stuck[I915_NUM_RINGS] = { 0 };
2997 #define BUSY 1
2998 #define KICK 5
2999 #define HUNG 20
3000
3001 if (!i915.enable_hangcheck)
3002 return;
3003
3004 for_each_ring(ring, dev_priv, i) {
3005 u64 acthd;
3006 u32 seqno;
3007 bool busy = true;
3008
3009 semaphore_clear_deadlocks(dev_priv);
3010
3011 seqno = ring->get_seqno(ring, false);
3012 acthd = intel_ring_get_active_head(ring);
3013
3014 if (ring->hangcheck.seqno == seqno) {
3015 if (ring_idle(ring)) {
3016 ring->hangcheck.action = HANGCHECK_IDLE;
3017
3018 if (waitqueue_active(&ring->irq_queue)) {
3019 /* Issue a wake-up to catch stuck h/w. */
3020 if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) {
3021 if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring)))
3022 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
3023 ring->name);
3024 else
3025 DRM_INFO("Fake missed irq on %s\n",
3026 ring->name);
3027 wake_up_all(&ring->irq_queue);
3028 }
3029 /* Safeguard against driver failure */
3030 ring->hangcheck.score += BUSY;
3031 } else
3032 busy = false;
3033 } else {
3034 /* We always increment the hangcheck score
3035 * if the ring is busy and still processing
3036 * the same request, so that no single request
3037 * can run indefinitely (such as a chain of
3038 * batches). The only time we do not increment
3039 * the hangcheck score on this ring, if this
3040 * ring is in a legitimate wait for another
3041 * ring. In that case the waiting ring is a
3042 * victim and we want to be sure we catch the
3043 * right culprit. Then every time we do kick
3044 * the ring, add a small increment to the
3045 * score so that we can catch a batch that is
3046 * being repeatedly kicked and so responsible
3047 * for stalling the machine.
3048 */
3049 ring->hangcheck.action = ring_stuck(ring,
3050 acthd);
3051
3052 switch (ring->hangcheck.action) {
3053 case HANGCHECK_IDLE:
3054 case HANGCHECK_WAIT:
3055 case HANGCHECK_ACTIVE:
3056 break;
3057 case HANGCHECK_ACTIVE_LOOP:
3058 ring->hangcheck.score += BUSY;
3059 break;
3060 case HANGCHECK_KICK:
3061 ring->hangcheck.score += KICK;
3062 break;
3063 case HANGCHECK_HUNG:
3064 ring->hangcheck.score += HUNG;
3065 stuck[i] = true;
3066 break;
3067 }
3068 }
3069 } else {
3070 ring->hangcheck.action = HANGCHECK_ACTIVE;
3071
3072 /* Gradually reduce the count so that we catch DoS
3073 * attempts across multiple batches.
3074 */
3075 if (ring->hangcheck.score > 0)
3076 ring->hangcheck.score--;
3077
3078 ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0;
3079 }
3080
3081 ring->hangcheck.seqno = seqno;
3082 ring->hangcheck.acthd = acthd;
3083 busy_count += busy;
3084 }
3085
3086 for_each_ring(ring, dev_priv, i) {
3087 if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) {
3088 DRM_INFO("%s on %s\n",
3089 stuck[i] ? "stuck" : "no progress",
3090 ring->name);
3091 rings_hung++;
3092 }
3093 }
3094
3095 if (rings_hung)
3096 return i915_handle_error(dev, true, "Ring hung");
3097
3098 if (busy_count)
3099 /* Reset timer case chip hangs without another request
3100 * being added */
3101 i915_queue_hangcheck(dev);
3102 }
3103
3104 void i915_queue_hangcheck(struct drm_device *dev)
3105 {
3106 struct i915_gpu_error *e = &to_i915(dev)->gpu_error;
3107
3108 if (!i915.enable_hangcheck)
3109 return;
3110
3111 /* Don't continually defer the hangcheck so that it is always run at
3112 * least once after work has been scheduled on any ring. Otherwise,
3113 * we will ignore a hung ring if a second ring is kept busy.
3114 */
3115
3116 queue_delayed_work(e->hangcheck_wq, &e->hangcheck_work,
3117 round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES));
3118 }
3119
3120 static void ibx_irq_reset(struct drm_device *dev)
3121 {
3122 struct drm_i915_private *dev_priv = dev->dev_private;
3123
3124 if (HAS_PCH_NOP(dev))
3125 return;
3126
3127 GEN5_IRQ_RESET(SDE);
3128
3129 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
3130 I915_WRITE(SERR_INT, 0xffffffff);
3131 }
3132
3133 /*
3134 * SDEIER is also touched by the interrupt handler to work around missed PCH
3135 * interrupts. Hence we can't update it after the interrupt handler is enabled -
3136 * instead we unconditionally enable all PCH interrupt sources here, but then
3137 * only unmask them as needed with SDEIMR.
3138 *
3139 * This function needs to be called before interrupts are enabled.
3140 */
3141 static void ibx_irq_pre_postinstall(struct drm_device *dev)
3142 {
3143 struct drm_i915_private *dev_priv = dev->dev_private;
3144
3145 if (HAS_PCH_NOP(dev))
3146 return;
3147
3148 WARN_ON(I915_READ(SDEIER) != 0);
3149 I915_WRITE(SDEIER, 0xffffffff);
3150 POSTING_READ(SDEIER);
3151 }
3152
3153 static void gen5_gt_irq_reset(struct drm_device *dev)
3154 {
3155 struct drm_i915_private *dev_priv = dev->dev_private;
3156
3157 GEN5_IRQ_RESET(GT);
3158 if (INTEL_INFO(dev)->gen >= 6)
3159 GEN5_IRQ_RESET(GEN6_PM);
3160 }
3161
3162 /* drm_dma.h hooks
3163 */
3164 static void ironlake_irq_reset(struct drm_device *dev)
3165 {
3166 struct drm_i915_private *dev_priv = dev->dev_private;
3167
3168 I915_WRITE(HWSTAM, 0xffffffff);
3169
3170 GEN5_IRQ_RESET(DE);
3171 if (IS_GEN7(dev))
3172 I915_WRITE(GEN7_ERR_INT, 0xffffffff);
3173
3174 gen5_gt_irq_reset(dev);
3175
3176 ibx_irq_reset(dev);
3177 }
3178
3179 static void vlv_display_irq_reset(struct drm_i915_private *dev_priv)
3180 {
3181 enum pipe pipe;
3182
3183 I915_WRITE(PORT_HOTPLUG_EN, 0);
3184 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3185
3186 for_each_pipe(dev_priv, pipe)
3187 I915_WRITE(PIPESTAT(pipe), 0xffff);
3188
3189 GEN5_IRQ_RESET(VLV_);
3190 }
3191
3192 static void valleyview_irq_preinstall(struct drm_device *dev)
3193 {
3194 struct drm_i915_private *dev_priv = dev->dev_private;
3195
3196 /* VLV magic */
3197 I915_WRITE(VLV_IMR, 0);
3198 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
3199 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
3200 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
3201
3202 gen5_gt_irq_reset(dev);
3203
3204 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3205
3206 vlv_display_irq_reset(dev_priv);
3207 }
3208
3209 static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv)
3210 {
3211 GEN8_IRQ_RESET_NDX(GT, 0);
3212 GEN8_IRQ_RESET_NDX(GT, 1);
3213 GEN8_IRQ_RESET_NDX(GT, 2);
3214 GEN8_IRQ_RESET_NDX(GT, 3);
3215 }
3216
3217 static void gen8_irq_reset(struct drm_device *dev)
3218 {
3219 struct drm_i915_private *dev_priv = dev->dev_private;
3220 int pipe;
3221
3222 I915_WRITE(GEN8_MASTER_IRQ, 0);
3223 POSTING_READ(GEN8_MASTER_IRQ);
3224
3225 gen8_gt_irq_reset(dev_priv);
3226
3227 for_each_pipe(dev_priv, pipe)
3228 if (intel_display_power_is_enabled(dev_priv,
3229 POWER_DOMAIN_PIPE(pipe)))
3230 GEN8_IRQ_RESET_NDX(DE_PIPE, pipe);
3231
3232 GEN5_IRQ_RESET(GEN8_DE_PORT_);
3233 GEN5_IRQ_RESET(GEN8_DE_MISC_);
3234 GEN5_IRQ_RESET(GEN8_PCU_);
3235
3236 ibx_irq_reset(dev);
3237 }
3238
3239 void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3240 {
3241 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3242
3243 spin_lock_irq(&dev_priv->irq_lock);
3244 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3245 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3246 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3247 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3248 spin_unlock_irq(&dev_priv->irq_lock);
3249 }
3250
3251 static void cherryview_irq_preinstall(struct drm_device *dev)
3252 {
3253 struct drm_i915_private *dev_priv = dev->dev_private;
3254
3255 I915_WRITE(GEN8_MASTER_IRQ, 0);
3256 POSTING_READ(GEN8_MASTER_IRQ);
3257
3258 gen8_gt_irq_reset(dev_priv);
3259
3260 GEN5_IRQ_RESET(GEN8_PCU_);
3261
3262 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV);
3263
3264 vlv_display_irq_reset(dev_priv);
3265 }
3266
3267 static void ibx_hpd_irq_setup(struct drm_device *dev)
3268 {
3269 struct drm_i915_private *dev_priv = dev->dev_private;
3270 struct intel_encoder *intel_encoder;
3271 u32 hotplug_irqs, hotplug, enabled_irqs = 0;
3272
3273 if (HAS_PCH_IBX(dev)) {
3274 hotplug_irqs = SDE_HOTPLUG_MASK;
3275 for_each_intel_encoder(dev, intel_encoder)
3276 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3277 enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin];
3278 } else {
3279 hotplug_irqs = SDE_HOTPLUG_MASK_CPT;
3280 for_each_intel_encoder(dev, intel_encoder)
3281 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3282 enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin];
3283 }
3284
3285 ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs);
3286
3287 /*
3288 * Enable digital hotplug on the PCH, and configure the DP short pulse
3289 * duration to 2ms (which is the minimum in the Display Port spec)
3290 *
3291 * This register is the same on all known PCH chips.
3292 */
3293 hotplug = I915_READ(PCH_PORT_HOTPLUG);
3294 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
3295 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
3296 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
3297 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
3298 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
3299 }
3300
3301 static void ibx_irq_postinstall(struct drm_device *dev)
3302 {
3303 struct drm_i915_private *dev_priv = dev->dev_private;
3304 u32 mask;
3305
3306 if (HAS_PCH_NOP(dev))
3307 return;
3308
3309 if (HAS_PCH_IBX(dev))
3310 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
3311 else
3312 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
3313
3314 GEN5_ASSERT_IIR_IS_ZERO(SDEIIR);
3315 I915_WRITE(SDEIMR, ~mask);
3316 }
3317
3318 static void gen5_gt_irq_postinstall(struct drm_device *dev)
3319 {
3320 struct drm_i915_private *dev_priv = dev->dev_private;
3321 u32 pm_irqs, gt_irqs;
3322
3323 pm_irqs = gt_irqs = 0;
3324
3325 dev_priv->gt_irq_mask = ~0;
3326 if (HAS_L3_DPF(dev)) {
3327 /* L3 parity interrupt is always unmasked. */
3328 dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev);
3329 gt_irqs |= GT_PARITY_ERROR(dev);
3330 }
3331
3332 gt_irqs |= GT_RENDER_USER_INTERRUPT;
3333 if (IS_GEN5(dev)) {
3334 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
3335 ILK_BSD_USER_INTERRUPT;
3336 } else {
3337 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
3338 }
3339
3340 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
3341
3342 if (INTEL_INFO(dev)->gen >= 6) {
3343 /*
3344 * RPS interrupts will get enabled/disabled on demand when RPS
3345 * itself is enabled/disabled.
3346 */
3347 if (HAS_VEBOX(dev))
3348 pm_irqs |= PM_VEBOX_USER_INTERRUPT;
3349
3350 dev_priv->pm_irq_mask = 0xffffffff;
3351 GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs);
3352 }
3353 }
3354
3355 static int ironlake_irq_postinstall(struct drm_device *dev)
3356 {
3357 struct drm_i915_private *dev_priv = dev->dev_private;
3358 u32 display_mask, extra_mask;
3359
3360 if (INTEL_INFO(dev)->gen >= 7) {
3361 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
3362 DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
3363 DE_PLANEB_FLIP_DONE_IVB |
3364 DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
3365 extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
3366 DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
3367 } else {
3368 display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
3369 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
3370 DE_AUX_CHANNEL_A |
3371 DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
3372 DE_POISON);
3373 extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
3374 DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
3375 }
3376
3377 dev_priv->irq_mask = ~display_mask;
3378
3379 I915_WRITE(HWSTAM, 0xeffe);
3380
3381 ibx_irq_pre_postinstall(dev);
3382
3383 GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask);
3384
3385 gen5_gt_irq_postinstall(dev);
3386
3387 ibx_irq_postinstall(dev);
3388
3389 if (IS_IRONLAKE_M(dev)) {
3390 /* Enable PCU event interrupts
3391 *
3392 * spinlocking not required here for correctness since interrupt
3393 * setup is guaranteed to run in single-threaded context. But we
3394 * need it to make the assert_spin_locked happy. */
3395 spin_lock_irq(&dev_priv->irq_lock);
3396 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
3397 spin_unlock_irq(&dev_priv->irq_lock);
3398 }
3399
3400 return 0;
3401 }
3402
3403 static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv)
3404 {
3405 u32 pipestat_mask;
3406 u32 iir_mask;
3407 enum pipe pipe;
3408
3409 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3410 PIPE_FIFO_UNDERRUN_STATUS;
3411
3412 for_each_pipe(dev_priv, pipe)
3413 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3414 POSTING_READ(PIPESTAT(PIPE_A));
3415
3416 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3417 PIPE_CRC_DONE_INTERRUPT_STATUS;
3418
3419 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3420 for_each_pipe(dev_priv, pipe)
3421 i915_enable_pipestat(dev_priv, pipe, pipestat_mask);
3422
3423 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3424 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3425 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3426 if (IS_CHERRYVIEW(dev_priv))
3427 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3428 dev_priv->irq_mask &= ~iir_mask;
3429
3430 I915_WRITE(VLV_IIR, iir_mask);
3431 I915_WRITE(VLV_IIR, iir_mask);
3432 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3433 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3434 POSTING_READ(VLV_IMR);
3435 }
3436
3437 static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv)
3438 {
3439 u32 pipestat_mask;
3440 u32 iir_mask;
3441 enum pipe pipe;
3442
3443 iir_mask = I915_DISPLAY_PORT_INTERRUPT |
3444 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3445 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT;
3446 if (IS_CHERRYVIEW(dev_priv))
3447 iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT;
3448
3449 dev_priv->irq_mask |= iir_mask;
3450 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3451 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3452 I915_WRITE(VLV_IIR, iir_mask);
3453 I915_WRITE(VLV_IIR, iir_mask);
3454 POSTING_READ(VLV_IIR);
3455
3456 pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV |
3457 PIPE_CRC_DONE_INTERRUPT_STATUS;
3458
3459 i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
3460 for_each_pipe(dev_priv, pipe)
3461 i915_disable_pipestat(dev_priv, pipe, pipestat_mask);
3462
3463 pipestat_mask = PIPESTAT_INT_STATUS_MASK |
3464 PIPE_FIFO_UNDERRUN_STATUS;
3465
3466 for_each_pipe(dev_priv, pipe)
3467 I915_WRITE(PIPESTAT(pipe), pipestat_mask);
3468 POSTING_READ(PIPESTAT(PIPE_A));
3469 }
3470
3471 void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv)
3472 {
3473 assert_spin_locked(&dev_priv->irq_lock);
3474
3475 if (dev_priv->display_irqs_enabled)
3476 return;
3477
3478 dev_priv->display_irqs_enabled = true;
3479
3480 if (intel_irqs_enabled(dev_priv))
3481 valleyview_display_irqs_install(dev_priv);
3482 }
3483
3484 void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv)
3485 {
3486 assert_spin_locked(&dev_priv->irq_lock);
3487
3488 if (!dev_priv->display_irqs_enabled)
3489 return;
3490
3491 dev_priv->display_irqs_enabled = false;
3492
3493 if (intel_irqs_enabled(dev_priv))
3494 valleyview_display_irqs_uninstall(dev_priv);
3495 }
3496
3497 static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
3498 {
3499 dev_priv->irq_mask = ~0;
3500
3501 I915_WRITE(PORT_HOTPLUG_EN, 0);
3502 POSTING_READ(PORT_HOTPLUG_EN);
3503
3504 I915_WRITE(VLV_IIR, 0xffffffff);
3505 I915_WRITE(VLV_IIR, 0xffffffff);
3506 I915_WRITE(VLV_IER, ~dev_priv->irq_mask);
3507 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
3508 POSTING_READ(VLV_IMR);
3509
3510 /* Interrupt setup is already guaranteed to be single-threaded, this is
3511 * just to make the assert_spin_locked check happy. */
3512 spin_lock_irq(&dev_priv->irq_lock);
3513 if (dev_priv->display_irqs_enabled)
3514 valleyview_display_irqs_install(dev_priv);
3515 spin_unlock_irq(&dev_priv->irq_lock);
3516 }
3517
3518 static int valleyview_irq_postinstall(struct drm_device *dev)
3519 {
3520 struct drm_i915_private *dev_priv = dev->dev_private;
3521
3522 vlv_display_irq_postinstall(dev_priv);
3523
3524 gen5_gt_irq_postinstall(dev);
3525
3526 /* ack & enable invalid PTE error interrupts */
3527 #if 0 /* FIXME: add support to irq handler for checking these bits */
3528 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
3529 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
3530 #endif
3531
3532 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
3533
3534 return 0;
3535 }
3536
3537 static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
3538 {
3539 /* These are interrupts we'll toggle with the ring mask register */
3540 uint32_t gt_interrupts[] = {
3541 GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3542 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
3543 GT_RENDER_L3_PARITY_ERROR_INTERRUPT |
3544 GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
3545 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT,
3546 GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3547 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
3548 GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT |
3549 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT,
3550 0,
3551 GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
3552 GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT
3553 };
3554
3555 dev_priv->pm_irq_mask = 0xffffffff;
3556 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
3557 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
3558 /*
3559 * RPS interrupts will get enabled/disabled on demand when RPS itself
3560 * is enabled/disabled.
3561 */
3562 GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
3563 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
3564 }
3565
3566 static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
3567 {
3568 uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE;
3569 uint32_t de_pipe_enables;
3570 int pipe;
3571 u32 aux_en = GEN8_AUX_CHANNEL_A;
3572
3573 if (IS_GEN9(dev_priv)) {
3574 de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE |
3575 GEN9_DE_PIPE_IRQ_FAULT_ERRORS;
3576 aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C |
3577 GEN9_AUX_CHANNEL_D;
3578 } else
3579 de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE |
3580 GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
3581
3582 de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
3583 GEN8_PIPE_FIFO_UNDERRUN;
3584
3585 dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
3586 dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
3587 dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked;
3588
3589 for_each_pipe(dev_priv, pipe)
3590 if (intel_display_power_is_enabled(dev_priv,
3591 POWER_DOMAIN_PIPE(pipe)))
3592 GEN8_IRQ_INIT_NDX(DE_PIPE, pipe,
3593 dev_priv->de_irq_mask[pipe],
3594 de_pipe_enables);
3595
3596 GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en);
3597 }
3598
3599 static int gen8_irq_postinstall(struct drm_device *dev)
3600 {
3601 struct drm_i915_private *dev_priv = dev->dev_private;
3602
3603 ibx_irq_pre_postinstall(dev);
3604
3605 gen8_gt_irq_postinstall(dev_priv);
3606 gen8_de_irq_postinstall(dev_priv);
3607
3608 ibx_irq_postinstall(dev);
3609
3610 I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL);
3611 POSTING_READ(GEN8_MASTER_IRQ);
3612
3613 return 0;
3614 }
3615
3616 static int cherryview_irq_postinstall(struct drm_device *dev)
3617 {
3618 struct drm_i915_private *dev_priv = dev->dev_private;
3619
3620 vlv_display_irq_postinstall(dev_priv);
3621
3622 gen8_gt_irq_postinstall(dev_priv);
3623
3624 I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE);
3625 POSTING_READ(GEN8_MASTER_IRQ);
3626
3627 return 0;
3628 }
3629
3630 static void gen8_irq_uninstall(struct drm_device *dev)
3631 {
3632 struct drm_i915_private *dev_priv = dev->dev_private;
3633
3634 if (!dev_priv)
3635 return;
3636
3637 gen8_irq_reset(dev);
3638 }
3639
3640 static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
3641 {
3642 /* Interrupt setup is already guaranteed to be single-threaded, this is
3643 * just to make the assert_spin_locked check happy. */
3644 spin_lock_irq(&dev_priv->irq_lock);
3645 if (dev_priv->display_irqs_enabled)
3646 valleyview_display_irqs_uninstall(dev_priv);
3647 spin_unlock_irq(&dev_priv->irq_lock);
3648
3649 vlv_display_irq_reset(dev_priv);
3650
3651 dev_priv->irq_mask = ~0;
3652 }
3653
3654 static void valleyview_irq_uninstall(struct drm_device *dev)
3655 {
3656 struct drm_i915_private *dev_priv = dev->dev_private;
3657
3658 if (!dev_priv)
3659 return;
3660
3661 I915_WRITE(VLV_MASTER_IER, 0);
3662
3663 gen5_gt_irq_reset(dev);
3664
3665 I915_WRITE(HWSTAM, 0xffffffff);
3666
3667 vlv_display_irq_uninstall(dev_priv);
3668 }
3669
3670 static void cherryview_irq_uninstall(struct drm_device *dev)
3671 {
3672 struct drm_i915_private *dev_priv = dev->dev_private;
3673
3674 if (!dev_priv)
3675 return;
3676
3677 I915_WRITE(GEN8_MASTER_IRQ, 0);
3678 POSTING_READ(GEN8_MASTER_IRQ);
3679
3680 gen8_gt_irq_reset(dev_priv);
3681
3682 GEN5_IRQ_RESET(GEN8_PCU_);
3683
3684 vlv_display_irq_uninstall(dev_priv);
3685 }
3686
3687 static void ironlake_irq_uninstall(struct drm_device *dev)
3688 {
3689 struct drm_i915_private *dev_priv = dev->dev_private;
3690
3691 if (!dev_priv)
3692 return;
3693
3694 ironlake_irq_reset(dev);
3695 }
3696
3697 static void i8xx_irq_preinstall(struct drm_device * dev)
3698 {
3699 struct drm_i915_private *dev_priv = dev->dev_private;
3700 int pipe;
3701
3702 for_each_pipe(dev_priv, pipe)
3703 I915_WRITE(PIPESTAT(pipe), 0);
3704 I915_WRITE16(IMR, 0xffff);
3705 I915_WRITE16(IER, 0x0);
3706 POSTING_READ16(IER);
3707 }
3708
3709 static int i8xx_irq_postinstall(struct drm_device *dev)
3710 {
3711 struct drm_i915_private *dev_priv = dev->dev_private;
3712
3713 I915_WRITE16(EMR,
3714 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3715
3716 /* Unmask the interrupts that we always want on. */
3717 dev_priv->irq_mask =
3718 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3719 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3720 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3721 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3722 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3723 I915_WRITE16(IMR, dev_priv->irq_mask);
3724
3725 I915_WRITE16(IER,
3726 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3727 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3728 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3729 I915_USER_INTERRUPT);
3730 POSTING_READ16(IER);
3731
3732 /* Interrupt setup is already guaranteed to be single-threaded, this is
3733 * just to make the assert_spin_locked check happy. */
3734 spin_lock_irq(&dev_priv->irq_lock);
3735 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3736 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3737 spin_unlock_irq(&dev_priv->irq_lock);
3738
3739 return 0;
3740 }
3741
3742 /*
3743 * Returns true when a page flip has completed.
3744 */
3745 static bool i8xx_handle_vblank(struct drm_device *dev,
3746 int plane, int pipe, u32 iir)
3747 {
3748 struct drm_i915_private *dev_priv = dev->dev_private;
3749 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3750
3751 if (!intel_pipe_handle_vblank(dev, pipe))
3752 return false;
3753
3754 if ((iir & flip_pending) == 0)
3755 goto check_page_flip;
3756
3757 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3758 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3759 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3760 * the flip is completed (no longer pending). Since this doesn't raise
3761 * an interrupt per se, we watch for the change at vblank.
3762 */
3763 if (I915_READ16(ISR) & flip_pending)
3764 goto check_page_flip;
3765
3766 intel_prepare_page_flip(dev, plane);
3767 intel_finish_page_flip(dev, pipe);
3768 return true;
3769
3770 check_page_flip:
3771 intel_check_page_flip(dev, pipe);
3772 return false;
3773 }
3774
3775 static irqreturn_t i8xx_irq_handler(int irq, void *arg)
3776 {
3777 struct drm_device *dev = arg;
3778 struct drm_i915_private *dev_priv = dev->dev_private;
3779 u16 iir, new_iir;
3780 u32 pipe_stats[2];
3781 int pipe;
3782 u16 flip_mask =
3783 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3784 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3785
3786 if (!intel_irqs_enabled(dev_priv))
3787 return IRQ_NONE;
3788
3789 iir = I915_READ16(IIR);
3790 if (iir == 0)
3791 return IRQ_NONE;
3792
3793 while (iir & ~flip_mask) {
3794 /* Can't rely on pipestat interrupt bit in iir as it might
3795 * have been cleared after the pipestat interrupt was received.
3796 * It doesn't set the bit in iir again, but it still produces
3797 * interrupts (for non-MSI).
3798 */
3799 spin_lock(&dev_priv->irq_lock);
3800 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3801 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3802
3803 for_each_pipe(dev_priv, pipe) {
3804 int reg = PIPESTAT(pipe);
3805 pipe_stats[pipe] = I915_READ(reg);
3806
3807 /*
3808 * Clear the PIPE*STAT regs before the IIR
3809 */
3810 if (pipe_stats[pipe] & 0x8000ffff)
3811 I915_WRITE(reg, pipe_stats[pipe]);
3812 }
3813 spin_unlock(&dev_priv->irq_lock);
3814
3815 I915_WRITE16(IIR, iir & ~flip_mask);
3816 new_iir = I915_READ16(IIR); /* Flush posted writes */
3817
3818 if (iir & I915_USER_INTERRUPT)
3819 notify_ring(dev, &dev_priv->ring[RCS]);
3820
3821 for_each_pipe(dev_priv, pipe) {
3822 int plane = pipe;
3823 if (HAS_FBC(dev))
3824 plane = !plane;
3825
3826 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
3827 i8xx_handle_vblank(dev, plane, pipe, iir))
3828 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
3829
3830 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
3831 i9xx_pipe_crc_irq_handler(dev, pipe);
3832
3833 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3834 intel_cpu_fifo_underrun_irq_handler(dev_priv,
3835 pipe);
3836 }
3837
3838 iir = new_iir;
3839 }
3840
3841 return IRQ_HANDLED;
3842 }
3843
3844 static void i8xx_irq_uninstall(struct drm_device * dev)
3845 {
3846 struct drm_i915_private *dev_priv = dev->dev_private;
3847 int pipe;
3848
3849 for_each_pipe(dev_priv, pipe) {
3850 /* Clear enable bits; then clear status bits */
3851 I915_WRITE(PIPESTAT(pipe), 0);
3852 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3853 }
3854 I915_WRITE16(IMR, 0xffff);
3855 I915_WRITE16(IER, 0x0);
3856 I915_WRITE16(IIR, I915_READ16(IIR));
3857 }
3858
3859 static void i915_irq_preinstall(struct drm_device * dev)
3860 {
3861 struct drm_i915_private *dev_priv = dev->dev_private;
3862 int pipe;
3863
3864 if (I915_HAS_HOTPLUG(dev)) {
3865 I915_WRITE(PORT_HOTPLUG_EN, 0);
3866 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3867 }
3868
3869 I915_WRITE16(HWSTAM, 0xeffe);
3870 for_each_pipe(dev_priv, pipe)
3871 I915_WRITE(PIPESTAT(pipe), 0);
3872 I915_WRITE(IMR, 0xffffffff);
3873 I915_WRITE(IER, 0x0);
3874 POSTING_READ(IER);
3875 }
3876
3877 static int i915_irq_postinstall(struct drm_device *dev)
3878 {
3879 struct drm_i915_private *dev_priv = dev->dev_private;
3880 u32 enable_mask;
3881
3882 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3883
3884 /* Unmask the interrupts that we always want on. */
3885 dev_priv->irq_mask =
3886 ~(I915_ASLE_INTERRUPT |
3887 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3888 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3889 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3890 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3891 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3892
3893 enable_mask =
3894 I915_ASLE_INTERRUPT |
3895 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3896 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3897 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3898 I915_USER_INTERRUPT;
3899
3900 if (I915_HAS_HOTPLUG(dev)) {
3901 I915_WRITE(PORT_HOTPLUG_EN, 0);
3902 POSTING_READ(PORT_HOTPLUG_EN);
3903
3904 /* Enable in IER... */
3905 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3906 /* and unmask in IMR */
3907 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3908 }
3909
3910 I915_WRITE(IMR, dev_priv->irq_mask);
3911 I915_WRITE(IER, enable_mask);
3912 POSTING_READ(IER);
3913
3914 i915_enable_asle_pipestat(dev);
3915
3916 /* Interrupt setup is already guaranteed to be single-threaded, this is
3917 * just to make the assert_spin_locked check happy. */
3918 spin_lock_irq(&dev_priv->irq_lock);
3919 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
3920 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
3921 spin_unlock_irq(&dev_priv->irq_lock);
3922
3923 return 0;
3924 }
3925
3926 /*
3927 * Returns true when a page flip has completed.
3928 */
3929 static bool i915_handle_vblank(struct drm_device *dev,
3930 int plane, int pipe, u32 iir)
3931 {
3932 struct drm_i915_private *dev_priv = dev->dev_private;
3933 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3934
3935 if (!intel_pipe_handle_vblank(dev, pipe))
3936 return false;
3937
3938 if ((iir & flip_pending) == 0)
3939 goto check_page_flip;
3940
3941 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3942 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3943 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3944 * the flip is completed (no longer pending). Since this doesn't raise
3945 * an interrupt per se, we watch for the change at vblank.
3946 */
3947 if (I915_READ(ISR) & flip_pending)
3948 goto check_page_flip;
3949
3950 intel_prepare_page_flip(dev, plane);
3951 intel_finish_page_flip(dev, pipe);
3952 return true;
3953
3954 check_page_flip:
3955 intel_check_page_flip(dev, pipe);
3956 return false;
3957 }
3958
3959 static irqreturn_t i915_irq_handler(int irq, void *arg)
3960 {
3961 struct drm_device *dev = arg;
3962 struct drm_i915_private *dev_priv = dev->dev_private;
3963 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
3964 u32 flip_mask =
3965 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3966 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3967 int pipe, ret = IRQ_NONE;
3968
3969 if (!intel_irqs_enabled(dev_priv))
3970 return IRQ_NONE;
3971
3972 iir = I915_READ(IIR);
3973 do {
3974 bool irq_received = (iir & ~flip_mask) != 0;
3975 bool blc_event = false;
3976
3977 /* Can't rely on pipestat interrupt bit in iir as it might
3978 * have been cleared after the pipestat interrupt was received.
3979 * It doesn't set the bit in iir again, but it still produces
3980 * interrupts (for non-MSI).
3981 */
3982 spin_lock(&dev_priv->irq_lock);
3983 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3984 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
3985
3986 for_each_pipe(dev_priv, pipe) {
3987 int reg = PIPESTAT(pipe);
3988 pipe_stats[pipe] = I915_READ(reg);
3989
3990 /* Clear the PIPE*STAT regs before the IIR */
3991 if (pipe_stats[pipe] & 0x8000ffff) {
3992 I915_WRITE(reg, pipe_stats[pipe]);
3993 irq_received = true;
3994 }
3995 }
3996 spin_unlock(&dev_priv->irq_lock);
3997
3998 if (!irq_received)
3999 break;
4000
4001 /* Consume port. Then clear IIR or we'll miss events */
4002 if (I915_HAS_HOTPLUG(dev) &&
4003 iir & I915_DISPLAY_PORT_INTERRUPT)
4004 i9xx_hpd_irq_handler(dev);
4005
4006 I915_WRITE(IIR, iir & ~flip_mask);
4007 new_iir = I915_READ(IIR); /* Flush posted writes */
4008
4009 if (iir & I915_USER_INTERRUPT)
4010 notify_ring(dev, &dev_priv->ring[RCS]);
4011
4012 for_each_pipe(dev_priv, pipe) {
4013 int plane = pipe;
4014 if (HAS_FBC(dev))
4015 plane = !plane;
4016
4017 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
4018 i915_handle_vblank(dev, plane, pipe, iir))
4019 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
4020
4021 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4022 blc_event = true;
4023
4024 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4025 i9xx_pipe_crc_irq_handler(dev, pipe);
4026
4027 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4028 intel_cpu_fifo_underrun_irq_handler(dev_priv,
4029 pipe);
4030 }
4031
4032 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4033 intel_opregion_asle_intr(dev);
4034
4035 /* With MSI, interrupts are only generated when iir
4036 * transitions from zero to nonzero. If another bit got
4037 * set while we were handling the existing iir bits, then
4038 * we would never get another interrupt.
4039 *
4040 * This is fine on non-MSI as well, as if we hit this path
4041 * we avoid exiting the interrupt handler only to generate
4042 * another one.
4043 *
4044 * Note that for MSI this could cause a stray interrupt report
4045 * if an interrupt landed in the time between writing IIR and
4046 * the posting read. This should be rare enough to never
4047 * trigger the 99% of 100,000 interrupts test for disabling
4048 * stray interrupts.
4049 */
4050 ret = IRQ_HANDLED;
4051 iir = new_iir;
4052 } while (iir & ~flip_mask);
4053
4054 return ret;
4055 }
4056
4057 static void i915_irq_uninstall(struct drm_device * dev)
4058 {
4059 struct drm_i915_private *dev_priv = dev->dev_private;
4060 int pipe;
4061
4062 if (I915_HAS_HOTPLUG(dev)) {
4063 I915_WRITE(PORT_HOTPLUG_EN, 0);
4064 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4065 }
4066
4067 I915_WRITE16(HWSTAM, 0xffff);
4068 for_each_pipe(dev_priv, pipe) {
4069 /* Clear enable bits; then clear status bits */
4070 I915_WRITE(PIPESTAT(pipe), 0);
4071 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
4072 }
4073 I915_WRITE(IMR, 0xffffffff);
4074 I915_WRITE(IER, 0x0);
4075
4076 I915_WRITE(IIR, I915_READ(IIR));
4077 }
4078
4079 static void i965_irq_preinstall(struct drm_device * dev)
4080 {
4081 struct drm_i915_private *dev_priv = dev->dev_private;
4082 int pipe;
4083
4084 I915_WRITE(PORT_HOTPLUG_EN, 0);
4085 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4086
4087 I915_WRITE(HWSTAM, 0xeffe);
4088 for_each_pipe(dev_priv, pipe)
4089 I915_WRITE(PIPESTAT(pipe), 0);
4090 I915_WRITE(IMR, 0xffffffff);
4091 I915_WRITE(IER, 0x0);
4092 POSTING_READ(IER);
4093 }
4094
4095 static int i965_irq_postinstall(struct drm_device *dev)
4096 {
4097 struct drm_i915_private *dev_priv = dev->dev_private;
4098 u32 enable_mask;
4099 u32 error_mask;
4100
4101 /* Unmask the interrupts that we always want on. */
4102 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
4103 I915_DISPLAY_PORT_INTERRUPT |
4104 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
4105 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
4106 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4107 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
4108 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
4109
4110 enable_mask = ~dev_priv->irq_mask;
4111 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4112 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
4113 enable_mask |= I915_USER_INTERRUPT;
4114
4115 if (IS_G4X(dev))
4116 enable_mask |= I915_BSD_USER_INTERRUPT;
4117
4118 /* Interrupt setup is already guaranteed to be single-threaded, this is
4119 * just to make the assert_spin_locked check happy. */
4120 spin_lock_irq(&dev_priv->irq_lock);
4121 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS);
4122 i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS);
4123 i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS);
4124 spin_unlock_irq(&dev_priv->irq_lock);
4125
4126 /*
4127 * Enable some error detection, note the instruction error mask
4128 * bit is reserved, so we leave it masked.
4129 */
4130 if (IS_G4X(dev)) {
4131 error_mask = ~(GM45_ERROR_PAGE_TABLE |
4132 GM45_ERROR_MEM_PRIV |
4133 GM45_ERROR_CP_PRIV |
4134 I915_ERROR_MEMORY_REFRESH);
4135 } else {
4136 error_mask = ~(I915_ERROR_PAGE_TABLE |
4137 I915_ERROR_MEMORY_REFRESH);
4138 }
4139 I915_WRITE(EMR, error_mask);
4140
4141 I915_WRITE(IMR, dev_priv->irq_mask);
4142 I915_WRITE(IER, enable_mask);
4143 POSTING_READ(IER);
4144
4145 I915_WRITE(PORT_HOTPLUG_EN, 0);
4146 POSTING_READ(PORT_HOTPLUG_EN);
4147
4148 i915_enable_asle_pipestat(dev);
4149
4150 return 0;
4151 }
4152
4153 static void i915_hpd_irq_setup(struct drm_device *dev)
4154 {
4155 struct drm_i915_private *dev_priv = dev->dev_private;
4156 struct intel_encoder *intel_encoder;
4157 u32 hotplug_en;
4158
4159 assert_spin_locked(&dev_priv->irq_lock);
4160
4161 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
4162 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
4163 /* Note HDMI and DP share hotplug bits */
4164 /* enable bits are the same for all generations */
4165 for_each_intel_encoder(dev, intel_encoder)
4166 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
4167 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
4168 /* Programming the CRT detection parameters tends
4169 to generate a spurious hotplug event about three
4170 seconds later. So just do it once.
4171 */
4172 if (IS_G4X(dev))
4173 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
4174 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
4175 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
4176
4177 /* Ignore TV since it's buggy */
4178 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
4179 }
4180
4181 static irqreturn_t i965_irq_handler(int irq, void *arg)
4182 {
4183 struct drm_device *dev = arg;
4184 struct drm_i915_private *dev_priv = dev->dev_private;
4185 u32 iir, new_iir;
4186 u32 pipe_stats[I915_MAX_PIPES];
4187 int ret = IRQ_NONE, pipe;
4188 u32 flip_mask =
4189 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
4190 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
4191
4192 if (!intel_irqs_enabled(dev_priv))
4193 return IRQ_NONE;
4194
4195 iir = I915_READ(IIR);
4196
4197 for (;;) {
4198 bool irq_received = (iir & ~flip_mask) != 0;
4199 bool blc_event = false;
4200
4201 /* Can't rely on pipestat interrupt bit in iir as it might
4202 * have been cleared after the pipestat interrupt was received.
4203 * It doesn't set the bit in iir again, but it still produces
4204 * interrupts (for non-MSI).
4205 */
4206 spin_lock(&dev_priv->irq_lock);
4207 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
4208 DRM_DEBUG("Command parser error, iir 0x%08x\n", iir);
4209
4210 for_each_pipe(dev_priv, pipe) {
4211 int reg = PIPESTAT(pipe);
4212 pipe_stats[pipe] = I915_READ(reg);
4213
4214 /*
4215 * Clear the PIPE*STAT regs before the IIR
4216 */
4217 if (pipe_stats[pipe] & 0x8000ffff) {
4218 I915_WRITE(reg, pipe_stats[pipe]);
4219 irq_received = true;
4220 }
4221 }
4222 spin_unlock(&dev_priv->irq_lock);
4223
4224 if (!irq_received)
4225 break;
4226
4227 ret = IRQ_HANDLED;
4228
4229 /* Consume port. Then clear IIR or we'll miss events */
4230 if (iir & I915_DISPLAY_PORT_INTERRUPT)
4231 i9xx_hpd_irq_handler(dev);
4232
4233 I915_WRITE(IIR, iir & ~flip_mask);
4234 new_iir = I915_READ(IIR); /* Flush posted writes */
4235
4236 if (iir & I915_USER_INTERRUPT)
4237 notify_ring(dev, &dev_priv->ring[RCS]);
4238 if (iir & I915_BSD_USER_INTERRUPT)
4239 notify_ring(dev, &dev_priv->ring[VCS]);
4240
4241 for_each_pipe(dev_priv, pipe) {
4242 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
4243 i915_handle_vblank(dev, pipe, pipe, iir))
4244 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
4245
4246 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
4247 blc_event = true;
4248
4249 if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
4250 i9xx_pipe_crc_irq_handler(dev, pipe);
4251
4252 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
4253 intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
4254 }
4255
4256 if (blc_event || (iir & I915_ASLE_INTERRUPT))
4257 intel_opregion_asle_intr(dev);
4258
4259 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
4260 gmbus_irq_handler(dev);
4261
4262 /* With MSI, interrupts are only generated when iir
4263 * transitions from zero to nonzero. If another bit got
4264 * set while we were handling the existing iir bits, then
4265 * we would never get another interrupt.
4266 *
4267 * This is fine on non-MSI as well, as if we hit this path
4268 * we avoid exiting the interrupt handler only to generate
4269 * another one.
4270 *
4271 * Note that for MSI this could cause a stray interrupt report
4272 * if an interrupt landed in the time between writing IIR and
4273 * the posting read. This should be rare enough to never
4274 * trigger the 99% of 100,000 interrupts test for disabling
4275 * stray interrupts.
4276 */
4277 iir = new_iir;
4278 }
4279
4280 return ret;
4281 }
4282
4283 static void i965_irq_uninstall(struct drm_device * dev)
4284 {
4285 struct drm_i915_private *dev_priv = dev->dev_private;
4286 int pipe;
4287
4288 if (!dev_priv)
4289 return;
4290
4291 I915_WRITE(PORT_HOTPLUG_EN, 0);
4292 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
4293
4294 I915_WRITE(HWSTAM, 0xffffffff);
4295 for_each_pipe(dev_priv, pipe)
4296 I915_WRITE(PIPESTAT(pipe), 0);
4297 I915_WRITE(IMR, 0xffffffff);
4298 I915_WRITE(IER, 0x0);
4299
4300 for_each_pipe(dev_priv, pipe)
4301 I915_WRITE(PIPESTAT(pipe),
4302 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
4303 I915_WRITE(IIR, I915_READ(IIR));
4304 }
4305
4306 static void intel_hpd_irq_reenable_work(struct work_struct *work)
4307 {
4308 struct drm_i915_private *dev_priv =
4309 container_of(work, typeof(*dev_priv),
4310 hotplug_reenable_work.work);
4311 struct drm_device *dev = dev_priv->dev;
4312 struct drm_mode_config *mode_config = &dev->mode_config;
4313 int i;
4314
4315 intel_runtime_pm_get(dev_priv);
4316
4317 spin_lock_irq(&dev_priv->irq_lock);
4318 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
4319 struct drm_connector *connector;
4320
4321 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
4322 continue;
4323
4324 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4325
4326 list_for_each_entry(connector, &mode_config->connector_list, head) {
4327 struct intel_connector *intel_connector = to_intel_connector(connector);
4328
4329 if (intel_connector->encoder->hpd_pin == i) {
4330 if (connector->polled != intel_connector->polled)
4331 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
4332 connector->name);
4333 connector->polled = intel_connector->polled;
4334 if (!connector->polled)
4335 connector->polled = DRM_CONNECTOR_POLL_HPD;
4336 }
4337 }
4338 }
4339 if (dev_priv->display.hpd_irq_setup)
4340 dev_priv->display.hpd_irq_setup(dev);
4341 spin_unlock_irq(&dev_priv->irq_lock);
4342
4343 intel_runtime_pm_put(dev_priv);
4344 }
4345
4346 /**
4347 * intel_irq_init - initializes irq support
4348 * @dev_priv: i915 device instance
4349 *
4350 * This function initializes all the irq support including work items, timers
4351 * and all the vtables. It does not setup the interrupt itself though.
4352 */
4353 void intel_irq_init(struct drm_i915_private *dev_priv)
4354 {
4355 struct drm_device *dev = dev_priv->dev;
4356
4357 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
4358 INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func);
4359 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
4360 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
4361
4362 /* Let's track the enabled rps events */
4363 if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
4364 /* WaGsvRC0ResidencyMethod:vlv */
4365 dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED;
4366 else
4367 dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS;
4368
4369 INIT_DELAYED_WORK(&dev_priv->gpu_error.hangcheck_work,
4370 i915_hangcheck_elapsed);
4371 INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work,
4372 intel_hpd_irq_reenable_work);
4373
4374 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
4375
4376 if (IS_GEN2(dev_priv)) {
4377 dev->max_vblank_count = 0;
4378 dev->driver->get_vblank_counter = i8xx_get_vblank_counter;
4379 } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) {
4380 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
4381 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
4382 } else {
4383 dev->driver->get_vblank_counter = i915_get_vblank_counter;
4384 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
4385 }
4386
4387 /*
4388 * Opt out of the vblank disable timer on everything except gen2.
4389 * Gen2 doesn't have a hardware frame counter and so depends on
4390 * vblank interrupts to produce sane vblank seuquence numbers.
4391 */
4392 if (!IS_GEN2(dev_priv))
4393 dev->vblank_disable_immediate = true;
4394
4395 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
4396 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
4397 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
4398 }
4399
4400 if (IS_CHERRYVIEW(dev_priv)) {
4401 dev->driver->irq_handler = cherryview_irq_handler;
4402 dev->driver->irq_preinstall = cherryview_irq_preinstall;
4403 dev->driver->irq_postinstall = cherryview_irq_postinstall;
4404 dev->driver->irq_uninstall = cherryview_irq_uninstall;
4405 dev->driver->enable_vblank = valleyview_enable_vblank;
4406 dev->driver->disable_vblank = valleyview_disable_vblank;
4407 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4408 } else if (IS_VALLEYVIEW(dev_priv)) {
4409 dev->driver->irq_handler = valleyview_irq_handler;
4410 dev->driver->irq_preinstall = valleyview_irq_preinstall;
4411 dev->driver->irq_postinstall = valleyview_irq_postinstall;
4412 dev->driver->irq_uninstall = valleyview_irq_uninstall;
4413 dev->driver->enable_vblank = valleyview_enable_vblank;
4414 dev->driver->disable_vblank = valleyview_disable_vblank;
4415 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4416 } else if (INTEL_INFO(dev_priv)->gen >= 8) {
4417 dev->driver->irq_handler = gen8_irq_handler;
4418 dev->driver->irq_preinstall = gen8_irq_reset;
4419 dev->driver->irq_postinstall = gen8_irq_postinstall;
4420 dev->driver->irq_uninstall = gen8_irq_uninstall;
4421 dev->driver->enable_vblank = gen8_enable_vblank;
4422 dev->driver->disable_vblank = gen8_disable_vblank;
4423 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4424 } else if (HAS_PCH_SPLIT(dev)) {
4425 dev->driver->irq_handler = ironlake_irq_handler;
4426 dev->driver->irq_preinstall = ironlake_irq_reset;
4427 dev->driver->irq_postinstall = ironlake_irq_postinstall;
4428 dev->driver->irq_uninstall = ironlake_irq_uninstall;
4429 dev->driver->enable_vblank = ironlake_enable_vblank;
4430 dev->driver->disable_vblank = ironlake_disable_vblank;
4431 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
4432 } else {
4433 if (INTEL_INFO(dev_priv)->gen == 2) {
4434 dev->driver->irq_preinstall = i8xx_irq_preinstall;
4435 dev->driver->irq_postinstall = i8xx_irq_postinstall;
4436 dev->driver->irq_handler = i8xx_irq_handler;
4437 dev->driver->irq_uninstall = i8xx_irq_uninstall;
4438 } else if (INTEL_INFO(dev_priv)->gen == 3) {
4439 dev->driver->irq_preinstall = i915_irq_preinstall;
4440 dev->driver->irq_postinstall = i915_irq_postinstall;
4441 dev->driver->irq_uninstall = i915_irq_uninstall;
4442 dev->driver->irq_handler = i915_irq_handler;
4443 } else {
4444 dev->driver->irq_preinstall = i965_irq_preinstall;
4445 dev->driver->irq_postinstall = i965_irq_postinstall;
4446 dev->driver->irq_uninstall = i965_irq_uninstall;
4447 dev->driver->irq_handler = i965_irq_handler;
4448 }
4449 if (I915_HAS_HOTPLUG(dev_priv))
4450 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4451 dev->driver->enable_vblank = i915_enable_vblank;
4452 dev->driver->disable_vblank = i915_disable_vblank;
4453 }
4454 }
4455
4456 /**
4457 * intel_hpd_init - initializes and enables hpd support
4458 * @dev_priv: i915 device instance
4459 *
4460 * This function enables the hotplug support. It requires that interrupts have
4461 * already been enabled with intel_irq_init_hw(). From this point on hotplug and
4462 * poll request can run concurrently to other code, so locking rules must be
4463 * obeyed.
4464 *
4465 * This is a separate step from interrupt enabling to simplify the locking rules
4466 * in the driver load and resume code.
4467 */
4468 void intel_hpd_init(struct drm_i915_private *dev_priv)
4469 {
4470 struct drm_device *dev = dev_priv->dev;
4471 struct drm_mode_config *mode_config = &dev->mode_config;
4472 struct drm_connector *connector;
4473 int i;
4474
4475 for (i = 1; i < HPD_NUM_PINS; i++) {
4476 dev_priv->hpd_stats[i].hpd_cnt = 0;
4477 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
4478 }
4479 list_for_each_entry(connector, &mode_config->connector_list, head) {
4480 struct intel_connector *intel_connector = to_intel_connector(connector);
4481 connector->polled = intel_connector->polled;
4482 if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
4483 connector->polled = DRM_CONNECTOR_POLL_HPD;
4484 if (intel_connector->mst_port)
4485 connector->polled = DRM_CONNECTOR_POLL_HPD;
4486 }
4487
4488 /* Interrupt setup is already guaranteed to be single-threaded, this is
4489 * just to make the assert_spin_locked checks happy. */
4490 spin_lock_irq(&dev_priv->irq_lock);
4491 if (dev_priv->display.hpd_irq_setup)
4492 dev_priv->display.hpd_irq_setup(dev);
4493 spin_unlock_irq(&dev_priv->irq_lock);
4494 }
4495
4496 /**
4497 * intel_irq_install - enables the hardware interrupt
4498 * @dev_priv: i915 device instance
4499 *
4500 * This function enables the hardware interrupt handling, but leaves the hotplug
4501 * handling still disabled. It is called after intel_irq_init().
4502 *
4503 * In the driver load and resume code we need working interrupts in a few places
4504 * but don't want to deal with the hassle of concurrent probe and hotplug
4505 * workers. Hence the split into this two-stage approach.
4506 */
4507 int intel_irq_install(struct drm_i915_private *dev_priv)
4508 {
4509 /*
4510 * We enable some interrupt sources in our postinstall hooks, so mark
4511 * interrupts as enabled _before_ actually enabling them to avoid
4512 * special cases in our ordering checks.
4513 */
4514 dev_priv->pm.irqs_enabled = true;
4515
4516 return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq);
4517 }
4518
4519 /**
4520 * intel_irq_uninstall - finilizes all irq handling
4521 * @dev_priv: i915 device instance
4522 *
4523 * This stops interrupt and hotplug handling and unregisters and frees all
4524 * resources acquired in the init functions.
4525 */
4526 void intel_irq_uninstall(struct drm_i915_private *dev_priv)
4527 {
4528 drm_irq_uninstall(dev_priv->dev);
4529 intel_hpd_cancel_work(dev_priv);
4530 dev_priv->pm.irqs_enabled = false;
4531 }
4532
4533 /**
4534 * intel_runtime_pm_disable_interrupts - runtime interrupt disabling
4535 * @dev_priv: i915 device instance
4536 *
4537 * This function is used to disable interrupts at runtime, both in the runtime
4538 * pm and the system suspend/resume code.
4539 */
4540 void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv)
4541 {
4542 dev_priv->dev->driver->irq_uninstall(dev_priv->dev);
4543 dev_priv->pm.irqs_enabled = false;
4544 synchronize_irq(dev_priv->dev->irq);
4545 }
4546
4547 /**
4548 * intel_runtime_pm_enable_interrupts - runtime interrupt enabling
4549 * @dev_priv: i915 device instance
4550 *
4551 * This function is used to enable interrupts at runtime, both in the runtime
4552 * pm and the system suspend/resume code.
4553 */
4554 void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv)
4555 {
4556 dev_priv->pm.irqs_enabled = true;
4557 dev_priv->dev->driver->irq_preinstall(dev_priv->dev);
4558 dev_priv->dev->driver->irq_postinstall(dev_priv->dev);
4559 }
This page took 0.152068 seconds and 5 git commands to generate.