Commit | Line | Data |
---|---|---|
0d6aa60b | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
1da177e4 | 2 | */ |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 | 28 | |
a70491cc JP |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | ||
63eeaf38 | 31 | #include <linux/sysrq.h> |
5a0e3ad6 | 32 | #include <linux/slab.h> |
b2c88f5b | 33 | #include <linux/circ_buf.h> |
760285e7 DH |
34 | #include <drm/drmP.h> |
35 | #include <drm/i915_drm.h> | |
1da177e4 | 36 | #include "i915_drv.h" |
1c5d22f7 | 37 | #include "i915_trace.h" |
79e53945 | 38 | #include "intel_drv.h" |
1da177e4 | 39 | |
fca52a55 DV |
40 | /** |
41 | * DOC: interrupt handling | |
42 | * | |
43 | * These functions provide the basic support for enabling and disabling the | |
44 | * interrupt handling support. There's a lot more functionality in i915_irq.c | |
45 | * and related files, but that will be described in separate chapters. | |
46 | */ | |
47 | ||
e5868a31 EE |
48 | static const u32 hpd_ibx[] = { |
49 | [HPD_CRT] = SDE_CRT_HOTPLUG, | |
50 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | |
51 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, | |
52 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, | |
53 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG | |
54 | }; | |
55 | ||
56 | static const u32 hpd_cpt[] = { | |
57 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, | |
73c352a2 | 58 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
e5868a31 EE |
59 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
60 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | |
61 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT | |
62 | }; | |
63 | ||
64 | static const u32 hpd_mask_i915[] = { | |
65 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, | |
66 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, | |
67 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, | |
68 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, | |
69 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, | |
70 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | |
71 | }; | |
72 | ||
704cfb87 | 73 | static const u32 hpd_status_g4x[] = { |
e5868a31 EE |
74 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, |
75 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | |
76 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | |
77 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
78 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
79 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
80 | }; | |
81 | ||
e5868a31 EE |
82 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
83 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
84 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, | |
85 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, | |
86 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
87 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
88 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
89 | }; | |
90 | ||
5c502442 | 91 | /* IIR can theoretically queue up two events. Be paranoid. */ |
f86f3fb0 | 92 | #define GEN8_IRQ_RESET_NDX(type, which) do { \ |
5c502442 PZ |
93 | I915_WRITE(GEN8_##type##_IMR(which), 0xffffffff); \ |
94 | POSTING_READ(GEN8_##type##_IMR(which)); \ | |
95 | I915_WRITE(GEN8_##type##_IER(which), 0); \ | |
96 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
97 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
98 | I915_WRITE(GEN8_##type##_IIR(which), 0xffffffff); \ | |
99 | POSTING_READ(GEN8_##type##_IIR(which)); \ | |
100 | } while (0) | |
101 | ||
f86f3fb0 | 102 | #define GEN5_IRQ_RESET(type) do { \ |
a9d356a6 | 103 | I915_WRITE(type##IMR, 0xffffffff); \ |
5c502442 | 104 | POSTING_READ(type##IMR); \ |
a9d356a6 | 105 | I915_WRITE(type##IER, 0); \ |
5c502442 PZ |
106 | I915_WRITE(type##IIR, 0xffffffff); \ |
107 | POSTING_READ(type##IIR); \ | |
108 | I915_WRITE(type##IIR, 0xffffffff); \ | |
109 | POSTING_READ(type##IIR); \ | |
a9d356a6 PZ |
110 | } while (0) |
111 | ||
337ba017 PZ |
112 | /* |
113 | * We should clear IMR at preinstall/uninstall, and just check at postinstall. | |
114 | */ | |
115 | #define GEN5_ASSERT_IIR_IS_ZERO(reg) do { \ | |
116 | u32 val = I915_READ(reg); \ | |
117 | if (val) { \ | |
118 | WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n", \ | |
119 | (reg), val); \ | |
120 | I915_WRITE((reg), 0xffffffff); \ | |
121 | POSTING_READ(reg); \ | |
122 | I915_WRITE((reg), 0xffffffff); \ | |
123 | POSTING_READ(reg); \ | |
124 | } \ | |
125 | } while (0) | |
126 | ||
35079899 | 127 | #define GEN8_IRQ_INIT_NDX(type, which, imr_val, ier_val) do { \ |
337ba017 | 128 | GEN5_ASSERT_IIR_IS_ZERO(GEN8_##type##_IIR(which)); \ |
35079899 | 129 | I915_WRITE(GEN8_##type##_IER(which), (ier_val)); \ |
7d1bd539 VS |
130 | I915_WRITE(GEN8_##type##_IMR(which), (imr_val)); \ |
131 | POSTING_READ(GEN8_##type##_IMR(which)); \ | |
35079899 PZ |
132 | } while (0) |
133 | ||
134 | #define GEN5_IRQ_INIT(type, imr_val, ier_val) do { \ | |
337ba017 | 135 | GEN5_ASSERT_IIR_IS_ZERO(type##IIR); \ |
35079899 | 136 | I915_WRITE(type##IER, (ier_val)); \ |
7d1bd539 VS |
137 | I915_WRITE(type##IMR, (imr_val)); \ |
138 | POSTING_READ(type##IMR); \ | |
35079899 PZ |
139 | } while (0) |
140 | ||
c9a9a268 ID |
141 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); |
142 | ||
036a4a7d | 143 | /* For display hotplug interrupt */ |
47339cd9 | 144 | void |
2d1013dd | 145 | ironlake_enable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
036a4a7d | 146 | { |
4bc9d430 DV |
147 | assert_spin_locked(&dev_priv->irq_lock); |
148 | ||
9df7575f | 149 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 150 | return; |
c67a470b | 151 | |
1ec14ad3 CW |
152 | if ((dev_priv->irq_mask & mask) != 0) { |
153 | dev_priv->irq_mask &= ~mask; | |
154 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 155 | POSTING_READ(DEIMR); |
036a4a7d ZW |
156 | } |
157 | } | |
158 | ||
47339cd9 | 159 | void |
2d1013dd | 160 | ironlake_disable_display_irq(struct drm_i915_private *dev_priv, u32 mask) |
036a4a7d | 161 | { |
4bc9d430 DV |
162 | assert_spin_locked(&dev_priv->irq_lock); |
163 | ||
06ffc778 | 164 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 165 | return; |
c67a470b | 166 | |
1ec14ad3 CW |
167 | if ((dev_priv->irq_mask & mask) != mask) { |
168 | dev_priv->irq_mask |= mask; | |
169 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 170 | POSTING_READ(DEIMR); |
036a4a7d ZW |
171 | } |
172 | } | |
173 | ||
43eaea13 PZ |
174 | /** |
175 | * ilk_update_gt_irq - update GTIMR | |
176 | * @dev_priv: driver private | |
177 | * @interrupt_mask: mask of interrupt bits to update | |
178 | * @enabled_irq_mask: mask of interrupt bits to enable | |
179 | */ | |
180 | static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | |
181 | uint32_t interrupt_mask, | |
182 | uint32_t enabled_irq_mask) | |
183 | { | |
184 | assert_spin_locked(&dev_priv->irq_lock); | |
185 | ||
15a17aae DV |
186 | WARN_ON(enabled_irq_mask & ~interrupt_mask); |
187 | ||
9df7575f | 188 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 189 | return; |
c67a470b | 190 | |
43eaea13 PZ |
191 | dev_priv->gt_irq_mask &= ~interrupt_mask; |
192 | dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); | |
193 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
194 | POSTING_READ(GTIMR); | |
195 | } | |
196 | ||
480c8033 | 197 | void gen5_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
43eaea13 PZ |
198 | { |
199 | ilk_update_gt_irq(dev_priv, mask, mask); | |
200 | } | |
201 | ||
480c8033 | 202 | void gen5_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
43eaea13 PZ |
203 | { |
204 | ilk_update_gt_irq(dev_priv, mask, 0); | |
205 | } | |
206 | ||
b900b949 ID |
207 | static u32 gen6_pm_iir(struct drm_i915_private *dev_priv) |
208 | { | |
209 | return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; | |
210 | } | |
211 | ||
a72fbc3a ID |
212 | static u32 gen6_pm_imr(struct drm_i915_private *dev_priv) |
213 | { | |
214 | return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IMR(2) : GEN6_PMIMR; | |
215 | } | |
216 | ||
b900b949 ID |
217 | static u32 gen6_pm_ier(struct drm_i915_private *dev_priv) |
218 | { | |
219 | return INTEL_INFO(dev_priv)->gen >= 8 ? GEN8_GT_IER(2) : GEN6_PMIER; | |
220 | } | |
221 | ||
edbfdb45 PZ |
222 | /** |
223 | * snb_update_pm_irq - update GEN6_PMIMR | |
224 | * @dev_priv: driver private | |
225 | * @interrupt_mask: mask of interrupt bits to update | |
226 | * @enabled_irq_mask: mask of interrupt bits to enable | |
227 | */ | |
228 | static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |
229 | uint32_t interrupt_mask, | |
230 | uint32_t enabled_irq_mask) | |
231 | { | |
605cd25b | 232 | uint32_t new_val; |
edbfdb45 | 233 | |
15a17aae DV |
234 | WARN_ON(enabled_irq_mask & ~interrupt_mask); |
235 | ||
edbfdb45 PZ |
236 | assert_spin_locked(&dev_priv->irq_lock); |
237 | ||
605cd25b | 238 | new_val = dev_priv->pm_irq_mask; |
f52ecbcf PZ |
239 | new_val &= ~interrupt_mask; |
240 | new_val |= (~enabled_irq_mask & interrupt_mask); | |
241 | ||
605cd25b PZ |
242 | if (new_val != dev_priv->pm_irq_mask) { |
243 | dev_priv->pm_irq_mask = new_val; | |
a72fbc3a ID |
244 | I915_WRITE(gen6_pm_imr(dev_priv), dev_priv->pm_irq_mask); |
245 | POSTING_READ(gen6_pm_imr(dev_priv)); | |
f52ecbcf | 246 | } |
edbfdb45 PZ |
247 | } |
248 | ||
480c8033 | 249 | void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
edbfdb45 | 250 | { |
9939fba2 ID |
251 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
252 | return; | |
253 | ||
edbfdb45 PZ |
254 | snb_update_pm_irq(dev_priv, mask, mask); |
255 | } | |
256 | ||
9939fba2 ID |
257 | static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, |
258 | uint32_t mask) | |
edbfdb45 PZ |
259 | { |
260 | snb_update_pm_irq(dev_priv, mask, 0); | |
261 | } | |
262 | ||
9939fba2 ID |
263 | void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
264 | { | |
265 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | |
266 | return; | |
267 | ||
268 | __gen6_disable_pm_irq(dev_priv, mask); | |
269 | } | |
270 | ||
3cc134e3 ID |
271 | void gen6_reset_rps_interrupts(struct drm_device *dev) |
272 | { | |
273 | struct drm_i915_private *dev_priv = dev->dev_private; | |
274 | uint32_t reg = gen6_pm_iir(dev_priv); | |
275 | ||
276 | spin_lock_irq(&dev_priv->irq_lock); | |
277 | I915_WRITE(reg, dev_priv->pm_rps_events); | |
278 | I915_WRITE(reg, dev_priv->pm_rps_events); | |
279 | POSTING_READ(reg); | |
280 | spin_unlock_irq(&dev_priv->irq_lock); | |
281 | } | |
282 | ||
b900b949 ID |
283 | void gen6_enable_rps_interrupts(struct drm_device *dev) |
284 | { | |
285 | struct drm_i915_private *dev_priv = dev->dev_private; | |
286 | ||
287 | spin_lock_irq(&dev_priv->irq_lock); | |
78e68d36 | 288 | |
b900b949 | 289 | WARN_ON(dev_priv->rps.pm_iir); |
3cc134e3 | 290 | WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); |
d4d70aa5 | 291 | dev_priv->rps.interrupts_enabled = true; |
78e68d36 ID |
292 | I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | |
293 | dev_priv->pm_rps_events); | |
b900b949 | 294 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); |
78e68d36 | 295 | |
b900b949 ID |
296 | spin_unlock_irq(&dev_priv->irq_lock); |
297 | } | |
298 | ||
299 | void gen6_disable_rps_interrupts(struct drm_device *dev) | |
300 | { | |
301 | struct drm_i915_private *dev_priv = dev->dev_private; | |
302 | ||
d4d70aa5 ID |
303 | spin_lock_irq(&dev_priv->irq_lock); |
304 | dev_priv->rps.interrupts_enabled = false; | |
305 | spin_unlock_irq(&dev_priv->irq_lock); | |
306 | ||
307 | cancel_work_sync(&dev_priv->rps.work); | |
308 | ||
9939fba2 ID |
309 | spin_lock_irq(&dev_priv->irq_lock); |
310 | ||
b900b949 ID |
311 | I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? |
312 | ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); | |
9939fba2 ID |
313 | |
314 | __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); | |
b900b949 ID |
315 | I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & |
316 | ~dev_priv->pm_rps_events); | |
9939fba2 ID |
317 | I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); |
318 | I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); | |
b900b949 | 319 | |
b900b949 | 320 | dev_priv->rps.pm_iir = 0; |
b900b949 | 321 | |
9939fba2 | 322 | spin_unlock_irq(&dev_priv->irq_lock); |
b900b949 ID |
323 | } |
324 | ||
fee884ed DV |
325 | /** |
326 | * ibx_display_interrupt_update - update SDEIMR | |
327 | * @dev_priv: driver private | |
328 | * @interrupt_mask: mask of interrupt bits to update | |
329 | * @enabled_irq_mask: mask of interrupt bits to enable | |
330 | */ | |
47339cd9 DV |
331 | void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, |
332 | uint32_t interrupt_mask, | |
333 | uint32_t enabled_irq_mask) | |
fee884ed DV |
334 | { |
335 | uint32_t sdeimr = I915_READ(SDEIMR); | |
336 | sdeimr &= ~interrupt_mask; | |
337 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | |
338 | ||
15a17aae DV |
339 | WARN_ON(enabled_irq_mask & ~interrupt_mask); |
340 | ||
fee884ed DV |
341 | assert_spin_locked(&dev_priv->irq_lock); |
342 | ||
9df7575f | 343 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) |
c67a470b | 344 | return; |
c67a470b | 345 | |
fee884ed DV |
346 | I915_WRITE(SDEIMR, sdeimr); |
347 | POSTING_READ(SDEIMR); | |
348 | } | |
8664281b | 349 | |
b5ea642a | 350 | static void |
755e9019 ID |
351 | __i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
352 | u32 enable_mask, u32 status_mask) | |
7c463586 | 353 | { |
46c06a30 | 354 | u32 reg = PIPESTAT(pipe); |
755e9019 | 355 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
7c463586 | 356 | |
b79480ba | 357 | assert_spin_locked(&dev_priv->irq_lock); |
d518ce50 | 358 | WARN_ON(!intel_irqs_enabled(dev_priv)); |
b79480ba | 359 | |
04feced9 VS |
360 | if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || |
361 | status_mask & ~PIPESTAT_INT_STATUS_MASK, | |
362 | "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", | |
363 | pipe_name(pipe), enable_mask, status_mask)) | |
755e9019 ID |
364 | return; |
365 | ||
366 | if ((pipestat & enable_mask) == enable_mask) | |
46c06a30 VS |
367 | return; |
368 | ||
91d181dd ID |
369 | dev_priv->pipestat_irq_mask[pipe] |= status_mask; |
370 | ||
46c06a30 | 371 | /* Enable the interrupt, clear any pending status */ |
755e9019 | 372 | pipestat |= enable_mask | status_mask; |
46c06a30 VS |
373 | I915_WRITE(reg, pipestat); |
374 | POSTING_READ(reg); | |
7c463586 KP |
375 | } |
376 | ||
b5ea642a | 377 | static void |
755e9019 ID |
378 | __i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, |
379 | u32 enable_mask, u32 status_mask) | |
7c463586 | 380 | { |
46c06a30 | 381 | u32 reg = PIPESTAT(pipe); |
755e9019 | 382 | u32 pipestat = I915_READ(reg) & PIPESTAT_INT_ENABLE_MASK; |
7c463586 | 383 | |
b79480ba | 384 | assert_spin_locked(&dev_priv->irq_lock); |
d518ce50 | 385 | WARN_ON(!intel_irqs_enabled(dev_priv)); |
b79480ba | 386 | |
04feced9 VS |
387 | if (WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK || |
388 | status_mask & ~PIPESTAT_INT_STATUS_MASK, | |
389 | "pipe %c: enable_mask=0x%x, status_mask=0x%x\n", | |
390 | pipe_name(pipe), enable_mask, status_mask)) | |
46c06a30 VS |
391 | return; |
392 | ||
755e9019 ID |
393 | if ((pipestat & enable_mask) == 0) |
394 | return; | |
395 | ||
91d181dd ID |
396 | dev_priv->pipestat_irq_mask[pipe] &= ~status_mask; |
397 | ||
755e9019 | 398 | pipestat &= ~enable_mask; |
46c06a30 VS |
399 | I915_WRITE(reg, pipestat); |
400 | POSTING_READ(reg); | |
7c463586 KP |
401 | } |
402 | ||
10c59c51 ID |
403 | static u32 vlv_get_pipestat_enable_mask(struct drm_device *dev, u32 status_mask) |
404 | { | |
405 | u32 enable_mask = status_mask << 16; | |
406 | ||
407 | /* | |
724a6905 VS |
408 | * On pipe A we don't support the PSR interrupt yet, |
409 | * on pipe B and C the same bit MBZ. | |
10c59c51 ID |
410 | */ |
411 | if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV)) | |
412 | return 0; | |
724a6905 VS |
413 | /* |
414 | * On pipe B and C we don't support the PSR interrupt yet, on pipe | |
415 | * A the same bit is for perf counters which we don't use either. | |
416 | */ | |
417 | if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV)) | |
418 | return 0; | |
10c59c51 ID |
419 | |
420 | enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS | | |
421 | SPRITE0_FLIP_DONE_INT_EN_VLV | | |
422 | SPRITE1_FLIP_DONE_INT_EN_VLV); | |
423 | if (status_mask & SPRITE0_FLIP_DONE_INT_STATUS_VLV) | |
424 | enable_mask |= SPRITE0_FLIP_DONE_INT_EN_VLV; | |
425 | if (status_mask & SPRITE1_FLIP_DONE_INT_STATUS_VLV) | |
426 | enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV; | |
427 | ||
428 | return enable_mask; | |
429 | } | |
430 | ||
755e9019 ID |
431 | void |
432 | i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
433 | u32 status_mask) | |
434 | { | |
435 | u32 enable_mask; | |
436 | ||
10c59c51 ID |
437 | if (IS_VALLEYVIEW(dev_priv->dev)) |
438 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | |
439 | status_mask); | |
440 | else | |
441 | enable_mask = status_mask << 16; | |
755e9019 ID |
442 | __i915_enable_pipestat(dev_priv, pipe, enable_mask, status_mask); |
443 | } | |
444 | ||
445 | void | |
446 | i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, | |
447 | u32 status_mask) | |
448 | { | |
449 | u32 enable_mask; | |
450 | ||
10c59c51 ID |
451 | if (IS_VALLEYVIEW(dev_priv->dev)) |
452 | enable_mask = vlv_get_pipestat_enable_mask(dev_priv->dev, | |
453 | status_mask); | |
454 | else | |
455 | enable_mask = status_mask << 16; | |
755e9019 ID |
456 | __i915_disable_pipestat(dev_priv, pipe, enable_mask, status_mask); |
457 | } | |
458 | ||
01c66889 | 459 | /** |
f49e38dd | 460 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
01c66889 | 461 | */ |
f49e38dd | 462 | static void i915_enable_asle_pipestat(struct drm_device *dev) |
01c66889 | 463 | { |
2d1013dd | 464 | struct drm_i915_private *dev_priv = dev->dev_private; |
1ec14ad3 | 465 | |
f49e38dd JN |
466 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) |
467 | return; | |
468 | ||
13321786 | 469 | spin_lock_irq(&dev_priv->irq_lock); |
01c66889 | 470 | |
755e9019 | 471 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_LEGACY_BLC_EVENT_STATUS); |
f898780b | 472 | if (INTEL_INFO(dev)->gen >= 4) |
3b6c42e8 | 473 | i915_enable_pipestat(dev_priv, PIPE_A, |
755e9019 | 474 | PIPE_LEGACY_BLC_EVENT_STATUS); |
1ec14ad3 | 475 | |
13321786 | 476 | spin_unlock_irq(&dev_priv->irq_lock); |
01c66889 ZY |
477 | } |
478 | ||
0a3e67a4 JB |
479 | /** |
480 | * i915_pipe_enabled - check if a pipe is enabled | |
481 | * @dev: DRM device | |
482 | * @pipe: pipe to check | |
483 | * | |
484 | * Reading certain registers when the pipe is disabled can hang the chip. | |
485 | * Use this routine to make sure the PLL is running and the pipe is active | |
486 | * before reading such registers if unsure. | |
487 | */ | |
488 | static int | |
489 | i915_pipe_enabled(struct drm_device *dev, int pipe) | |
490 | { | |
2d1013dd | 491 | struct drm_i915_private *dev_priv = dev->dev_private; |
702e7a56 | 492 | |
a01025af DV |
493 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
494 | /* Locking is horribly broken here, but whatever. */ | |
495 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
496 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
71f8ba6b | 497 | |
a01025af DV |
498 | return intel_crtc->active; |
499 | } else { | |
500 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | |
501 | } | |
0a3e67a4 JB |
502 | } |
503 | ||
f75f3746 VS |
504 | /* |
505 | * This timing diagram depicts the video signal in and | |
506 | * around the vertical blanking period. | |
507 | * | |
508 | * Assumptions about the fictitious mode used in this example: | |
509 | * vblank_start >= 3 | |
510 | * vsync_start = vblank_start + 1 | |
511 | * vsync_end = vblank_start + 2 | |
512 | * vtotal = vblank_start + 3 | |
513 | * | |
514 | * start of vblank: | |
515 | * latch double buffered registers | |
516 | * increment frame counter (ctg+) | |
517 | * generate start of vblank interrupt (gen4+) | |
518 | * | | |
519 | * | frame start: | |
520 | * | generate frame start interrupt (aka. vblank interrupt) (gmch) | |
521 | * | may be shifted forward 1-3 extra lines via PIPECONF | |
522 | * | | | |
523 | * | | start of vsync: | |
524 | * | | generate vsync interrupt | |
525 | * | | | | |
526 | * ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx___ ___xxxx | |
527 | * . \hs/ . \hs/ \hs/ \hs/ . \hs/ | |
528 | * ----va---> <-----------------vb--------------------> <--------va------------- | |
529 | * | | <----vs-----> | | |
530 | * -vbs-----> <---vbs+1---> <---vbs+2---> <-----0-----> <-----1-----> <-----2--- (scanline counter gen2) | |
531 | * -vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2---> <-----0--- (scanline counter gen3+) | |
532 | * -vbs-2---> <---vbs-2---> <---vbs-1---> <---vbs-----> <---vbs+1---> <---vbs+2- (scanline counter hsw+ hdmi) | |
533 | * | | | | |
534 | * last visible pixel first visible pixel | |
535 | * | increment frame counter (gen3/4) | |
536 | * pixel counter = vblank_start * htotal pixel counter = 0 (gen3/4) | |
537 | * | |
538 | * x = horizontal active | |
539 | * _ = horizontal blanking | |
540 | * hs = horizontal sync | |
541 | * va = vertical active | |
542 | * vb = vertical blanking | |
543 | * vs = vertical sync | |
544 | * vbs = vblank_start (number) | |
545 | * | |
546 | * Summary: | |
547 | * - most events happen at the start of horizontal sync | |
548 | * - frame start happens at the start of horizontal blank, 1-4 lines | |
549 | * (depending on PIPECONF settings) after the start of vblank | |
550 | * - gen3/4 pixel and frame counter are synchronized with the start | |
551 | * of horizontal active on the first line of vertical active | |
552 | */ | |
553 | ||
4cdb83ec VS |
554 | static u32 i8xx_get_vblank_counter(struct drm_device *dev, int pipe) |
555 | { | |
556 | /* Gen2 doesn't have a hardware frame counter */ | |
557 | return 0; | |
558 | } | |
559 | ||
42f52ef8 KP |
560 | /* Called from drm generic code, passed a 'crtc', which |
561 | * we use as a pipe index | |
562 | */ | |
f71d4af4 | 563 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
0a3e67a4 | 564 | { |
2d1013dd | 565 | struct drm_i915_private *dev_priv = dev->dev_private; |
0a3e67a4 JB |
566 | unsigned long high_frame; |
567 | unsigned long low_frame; | |
0b2a8e09 | 568 | u32 high1, high2, low, pixel, vbl_start, hsync_start, htotal; |
0a3e67a4 JB |
569 | |
570 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 571 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 572 | "pipe %c\n", pipe_name(pipe)); |
0a3e67a4 JB |
573 | return 0; |
574 | } | |
575 | ||
391f75e2 VS |
576 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
577 | struct intel_crtc *intel_crtc = | |
578 | to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
579 | const struct drm_display_mode *mode = | |
580 | &intel_crtc->config.adjusted_mode; | |
581 | ||
0b2a8e09 VS |
582 | htotal = mode->crtc_htotal; |
583 | hsync_start = mode->crtc_hsync_start; | |
584 | vbl_start = mode->crtc_vblank_start; | |
585 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) | |
586 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
391f75e2 | 587 | } else { |
a2d213dd | 588 | enum transcoder cpu_transcoder = (enum transcoder) pipe; |
391f75e2 VS |
589 | |
590 | htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1; | |
0b2a8e09 | 591 | hsync_start = (I915_READ(HSYNC(cpu_transcoder)) & 0x1fff) + 1; |
391f75e2 | 592 | vbl_start = (I915_READ(VBLANK(cpu_transcoder)) & 0x1fff) + 1; |
0b2a8e09 VS |
593 | if ((I915_READ(PIPECONF(cpu_transcoder)) & |
594 | PIPECONF_INTERLACE_MASK) != PIPECONF_PROGRESSIVE) | |
595 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
391f75e2 VS |
596 | } |
597 | ||
0b2a8e09 VS |
598 | /* Convert to pixel count */ |
599 | vbl_start *= htotal; | |
600 | ||
601 | /* Start of vblank event occurs at start of hsync */ | |
602 | vbl_start -= htotal - hsync_start; | |
603 | ||
9db4a9c7 JB |
604 | high_frame = PIPEFRAME(pipe); |
605 | low_frame = PIPEFRAMEPIXEL(pipe); | |
5eddb70b | 606 | |
0a3e67a4 JB |
607 | /* |
608 | * High & low register fields aren't synchronized, so make sure | |
609 | * we get a low value that's stable across two reads of the high | |
610 | * register. | |
611 | */ | |
612 | do { | |
5eddb70b | 613 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
391f75e2 | 614 | low = I915_READ(low_frame); |
5eddb70b | 615 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
0a3e67a4 JB |
616 | } while (high1 != high2); |
617 | ||
5eddb70b | 618 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
391f75e2 | 619 | pixel = low & PIPE_PIXEL_MASK; |
5eddb70b | 620 | low >>= PIPE_FRAME_LOW_SHIFT; |
391f75e2 VS |
621 | |
622 | /* | |
623 | * The frame counter increments at beginning of active. | |
624 | * Cook up a vblank counter by also checking the pixel | |
625 | * counter against vblank start. | |
626 | */ | |
edc08d0a | 627 | return (((high1 << 8) | low) + (pixel >= vbl_start)) & 0xffffff; |
0a3e67a4 JB |
628 | } |
629 | ||
f71d4af4 | 630 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
9880b7a5 | 631 | { |
2d1013dd | 632 | struct drm_i915_private *dev_priv = dev->dev_private; |
9db4a9c7 | 633 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
9880b7a5 JB |
634 | |
635 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 636 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 637 | "pipe %c\n", pipe_name(pipe)); |
9880b7a5 JB |
638 | return 0; |
639 | } | |
640 | ||
641 | return I915_READ(reg); | |
642 | } | |
643 | ||
ad3543ed MK |
644 | /* raw reads, only for fast reads of display block, no need for forcewake etc. */ |
645 | #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__)) | |
ad3543ed | 646 | |
a225f079 VS |
647 | static int __intel_get_crtc_scanline(struct intel_crtc *crtc) |
648 | { | |
649 | struct drm_device *dev = crtc->base.dev; | |
650 | struct drm_i915_private *dev_priv = dev->dev_private; | |
651 | const struct drm_display_mode *mode = &crtc->config.adjusted_mode; | |
652 | enum pipe pipe = crtc->pipe; | |
80715b2f | 653 | int position, vtotal; |
a225f079 | 654 | |
80715b2f | 655 | vtotal = mode->crtc_vtotal; |
a225f079 VS |
656 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) |
657 | vtotal /= 2; | |
658 | ||
659 | if (IS_GEN2(dev)) | |
660 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; | |
661 | else | |
662 | position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; | |
663 | ||
664 | /* | |
80715b2f VS |
665 | * See update_scanline_offset() for the details on the |
666 | * scanline_offset adjustment. | |
a225f079 | 667 | */ |
80715b2f | 668 | return (position + crtc->scanline_offset) % vtotal; |
a225f079 VS |
669 | } |
670 | ||
f71d4af4 | 671 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
abca9e45 VS |
672 | unsigned int flags, int *vpos, int *hpos, |
673 | ktime_t *stime, ktime_t *etime) | |
0af7e4df | 674 | { |
c2baf4b7 VS |
675 | struct drm_i915_private *dev_priv = dev->dev_private; |
676 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
677 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
678 | const struct drm_display_mode *mode = &intel_crtc->config.adjusted_mode; | |
3aa18df8 | 679 | int position; |
78e8fc6b | 680 | int vbl_start, vbl_end, hsync_start, htotal, vtotal; |
0af7e4df MK |
681 | bool in_vbl = true; |
682 | int ret = 0; | |
ad3543ed | 683 | unsigned long irqflags; |
0af7e4df | 684 | |
c2baf4b7 | 685 | if (!intel_crtc->active) { |
0af7e4df | 686 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
9db4a9c7 | 687 | "pipe %c\n", pipe_name(pipe)); |
0af7e4df MK |
688 | return 0; |
689 | } | |
690 | ||
c2baf4b7 | 691 | htotal = mode->crtc_htotal; |
78e8fc6b | 692 | hsync_start = mode->crtc_hsync_start; |
c2baf4b7 VS |
693 | vtotal = mode->crtc_vtotal; |
694 | vbl_start = mode->crtc_vblank_start; | |
695 | vbl_end = mode->crtc_vblank_end; | |
0af7e4df | 696 | |
d31faf65 VS |
697 | if (mode->flags & DRM_MODE_FLAG_INTERLACE) { |
698 | vbl_start = DIV_ROUND_UP(vbl_start, 2); | |
699 | vbl_end /= 2; | |
700 | vtotal /= 2; | |
701 | } | |
702 | ||
c2baf4b7 VS |
703 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
704 | ||
ad3543ed MK |
705 | /* |
706 | * Lock uncore.lock, as we will do multiple timing critical raw | |
707 | * register reads, potentially with preemption disabled, so the | |
708 | * following code must not block on uncore.lock. | |
709 | */ | |
710 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
78e8fc6b | 711 | |
ad3543ed MK |
712 | /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ |
713 | ||
714 | /* Get optional system timestamp before query. */ | |
715 | if (stime) | |
716 | *stime = ktime_get(); | |
717 | ||
7c06b08a | 718 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
0af7e4df MK |
719 | /* No obvious pixelcount register. Only query vertical |
720 | * scanout position from Display scan line register. | |
721 | */ | |
a225f079 | 722 | position = __intel_get_crtc_scanline(intel_crtc); |
0af7e4df MK |
723 | } else { |
724 | /* Have access to pixelcount since start of frame. | |
725 | * We can split this into vertical and horizontal | |
726 | * scanout position. | |
727 | */ | |
ad3543ed | 728 | position = (__raw_i915_read32(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
0af7e4df | 729 | |
3aa18df8 VS |
730 | /* convert to pixel counts */ |
731 | vbl_start *= htotal; | |
732 | vbl_end *= htotal; | |
733 | vtotal *= htotal; | |
78e8fc6b | 734 | |
7e78f1cb VS |
735 | /* |
736 | * In interlaced modes, the pixel counter counts all pixels, | |
737 | * so one field will have htotal more pixels. In order to avoid | |
738 | * the reported position from jumping backwards when the pixel | |
739 | * counter is beyond the length of the shorter field, just | |
740 | * clamp the position the length of the shorter field. This | |
741 | * matches how the scanline counter based position works since | |
742 | * the scanline counter doesn't count the two half lines. | |
743 | */ | |
744 | if (position >= vtotal) | |
745 | position = vtotal - 1; | |
746 | ||
78e8fc6b VS |
747 | /* |
748 | * Start of vblank interrupt is triggered at start of hsync, | |
749 | * just prior to the first active line of vblank. However we | |
750 | * consider lines to start at the leading edge of horizontal | |
751 | * active. So, should we get here before we've crossed into | |
752 | * the horizontal active of the first line in vblank, we would | |
753 | * not set the DRM_SCANOUTPOS_INVBL flag. In order to fix that, | |
754 | * always add htotal-hsync_start to the current pixel position. | |
755 | */ | |
756 | position = (position + htotal - hsync_start) % vtotal; | |
0af7e4df MK |
757 | } |
758 | ||
ad3543ed MK |
759 | /* Get optional system timestamp after query. */ |
760 | if (etime) | |
761 | *etime = ktime_get(); | |
762 | ||
763 | /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ | |
764 | ||
765 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
766 | ||
3aa18df8 VS |
767 | in_vbl = position >= vbl_start && position < vbl_end; |
768 | ||
769 | /* | |
770 | * While in vblank, position will be negative | |
771 | * counting up towards 0 at vbl_end. And outside | |
772 | * vblank, position will be positive counting | |
773 | * up since vbl_end. | |
774 | */ | |
775 | if (position >= vbl_start) | |
776 | position -= vbl_end; | |
777 | else | |
778 | position += vtotal - vbl_end; | |
0af7e4df | 779 | |
7c06b08a | 780 | if (IS_GEN2(dev) || IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
3aa18df8 VS |
781 | *vpos = position; |
782 | *hpos = 0; | |
783 | } else { | |
784 | *vpos = position / htotal; | |
785 | *hpos = position - (*vpos * htotal); | |
786 | } | |
0af7e4df | 787 | |
0af7e4df MK |
788 | /* In vblank? */ |
789 | if (in_vbl) | |
3d3cbd84 | 790 | ret |= DRM_SCANOUTPOS_IN_VBLANK; |
0af7e4df MK |
791 | |
792 | return ret; | |
793 | } | |
794 | ||
a225f079 VS |
795 | int intel_get_crtc_scanline(struct intel_crtc *crtc) |
796 | { | |
797 | struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; | |
798 | unsigned long irqflags; | |
799 | int position; | |
800 | ||
801 | spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); | |
802 | position = __intel_get_crtc_scanline(crtc); | |
803 | spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); | |
804 | ||
805 | return position; | |
806 | } | |
807 | ||
f71d4af4 | 808 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
0af7e4df MK |
809 | int *max_error, |
810 | struct timeval *vblank_time, | |
811 | unsigned flags) | |
812 | { | |
4041b853 | 813 | struct drm_crtc *crtc; |
0af7e4df | 814 | |
7eb552ae | 815 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
4041b853 | 816 | DRM_ERROR("Invalid crtc %d\n", pipe); |
0af7e4df MK |
817 | return -EINVAL; |
818 | } | |
819 | ||
820 | /* Get drm_crtc to timestamp: */ | |
4041b853 CW |
821 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
822 | if (crtc == NULL) { | |
823 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
824 | return -EINVAL; | |
825 | } | |
826 | ||
827 | if (!crtc->enabled) { | |
828 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
829 | return -EBUSY; | |
830 | } | |
0af7e4df MK |
831 | |
832 | /* Helper routine in DRM core does all the work: */ | |
4041b853 CW |
833 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
834 | vblank_time, flags, | |
7da903ef VS |
835 | crtc, |
836 | &to_intel_crtc(crtc)->config.adjusted_mode); | |
0af7e4df MK |
837 | } |
838 | ||
67c347ff JN |
839 | static bool intel_hpd_irq_event(struct drm_device *dev, |
840 | struct drm_connector *connector) | |
321a1b30 EE |
841 | { |
842 | enum drm_connector_status old_status; | |
843 | ||
844 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | |
845 | old_status = connector->status; | |
846 | ||
847 | connector->status = connector->funcs->detect(connector, false); | |
67c347ff JN |
848 | if (old_status == connector->status) |
849 | return false; | |
850 | ||
851 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | |
321a1b30 | 852 | connector->base.id, |
c23cc417 | 853 | connector->name, |
67c347ff JN |
854 | drm_get_connector_status_name(old_status), |
855 | drm_get_connector_status_name(connector->status)); | |
856 | ||
857 | return true; | |
321a1b30 EE |
858 | } |
859 | ||
13cf5504 DA |
860 | static void i915_digport_work_func(struct work_struct *work) |
861 | { | |
862 | struct drm_i915_private *dev_priv = | |
863 | container_of(work, struct drm_i915_private, dig_port_work); | |
13cf5504 DA |
864 | u32 long_port_mask, short_port_mask; |
865 | struct intel_digital_port *intel_dig_port; | |
866 | int i, ret; | |
867 | u32 old_bits = 0; | |
868 | ||
4cb21832 | 869 | spin_lock_irq(&dev_priv->irq_lock); |
13cf5504 DA |
870 | long_port_mask = dev_priv->long_hpd_port_mask; |
871 | dev_priv->long_hpd_port_mask = 0; | |
872 | short_port_mask = dev_priv->short_hpd_port_mask; | |
873 | dev_priv->short_hpd_port_mask = 0; | |
4cb21832 | 874 | spin_unlock_irq(&dev_priv->irq_lock); |
13cf5504 DA |
875 | |
876 | for (i = 0; i < I915_MAX_PORTS; i++) { | |
877 | bool valid = false; | |
878 | bool long_hpd = false; | |
879 | intel_dig_port = dev_priv->hpd_irq_port[i]; | |
880 | if (!intel_dig_port || !intel_dig_port->hpd_pulse) | |
881 | continue; | |
882 | ||
883 | if (long_port_mask & (1 << i)) { | |
884 | valid = true; | |
885 | long_hpd = true; | |
886 | } else if (short_port_mask & (1 << i)) | |
887 | valid = true; | |
888 | ||
889 | if (valid) { | |
890 | ret = intel_dig_port->hpd_pulse(intel_dig_port, long_hpd); | |
891 | if (ret == true) { | |
892 | /* if we get true fallback to old school hpd */ | |
893 | old_bits |= (1 << intel_dig_port->base.hpd_pin); | |
894 | } | |
895 | } | |
896 | } | |
897 | ||
898 | if (old_bits) { | |
4cb21832 | 899 | spin_lock_irq(&dev_priv->irq_lock); |
13cf5504 | 900 | dev_priv->hpd_event_bits |= old_bits; |
4cb21832 | 901 | spin_unlock_irq(&dev_priv->irq_lock); |
13cf5504 DA |
902 | schedule_work(&dev_priv->hotplug_work); |
903 | } | |
904 | } | |
905 | ||
5ca58282 JB |
906 | /* |
907 | * Handle hotplug events outside the interrupt handler proper. | |
908 | */ | |
ac4c16c5 EE |
909 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
910 | ||
5ca58282 JB |
911 | static void i915_hotplug_work_func(struct work_struct *work) |
912 | { | |
2d1013dd JN |
913 | struct drm_i915_private *dev_priv = |
914 | container_of(work, struct drm_i915_private, hotplug_work); | |
5ca58282 | 915 | struct drm_device *dev = dev_priv->dev; |
c31c4ba3 | 916 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed EE |
917 | struct intel_connector *intel_connector; |
918 | struct intel_encoder *intel_encoder; | |
919 | struct drm_connector *connector; | |
cd569aed | 920 | bool hpd_disabled = false; |
321a1b30 | 921 | bool changed = false; |
142e2398 | 922 | u32 hpd_event_bits; |
4ef69c7a | 923 | |
a65e34c7 | 924 | mutex_lock(&mode_config->mutex); |
e67189ab JB |
925 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
926 | ||
4cb21832 | 927 | spin_lock_irq(&dev_priv->irq_lock); |
142e2398 EE |
928 | |
929 | hpd_event_bits = dev_priv->hpd_event_bits; | |
930 | dev_priv->hpd_event_bits = 0; | |
cd569aed EE |
931 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
932 | intel_connector = to_intel_connector(connector); | |
36cd7444 DA |
933 | if (!intel_connector->encoder) |
934 | continue; | |
cd569aed EE |
935 | intel_encoder = intel_connector->encoder; |
936 | if (intel_encoder->hpd_pin > HPD_NONE && | |
937 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | |
938 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | |
939 | DRM_INFO("HPD interrupt storm detected on connector %s: " | |
940 | "switching from hotplug detection to polling\n", | |
c23cc417 | 941 | connector->name); |
cd569aed EE |
942 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; |
943 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
944 | | DRM_CONNECTOR_POLL_DISCONNECT; | |
945 | hpd_disabled = true; | |
946 | } | |
142e2398 EE |
947 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
948 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | |
c23cc417 | 949 | connector->name, intel_encoder->hpd_pin); |
142e2398 | 950 | } |
cd569aed EE |
951 | } |
952 | /* if there were no outputs to poll, poll was disabled, | |
953 | * therefore make sure it's enabled when disabling HPD on | |
954 | * some connectors */ | |
ac4c16c5 | 955 | if (hpd_disabled) { |
cd569aed | 956 | drm_kms_helper_poll_enable(dev); |
6323751d ID |
957 | mod_delayed_work(system_wq, &dev_priv->hotplug_reenable_work, |
958 | msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | |
ac4c16c5 | 959 | } |
cd569aed | 960 | |
4cb21832 | 961 | spin_unlock_irq(&dev_priv->irq_lock); |
cd569aed | 962 | |
321a1b30 EE |
963 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
964 | intel_connector = to_intel_connector(connector); | |
36cd7444 DA |
965 | if (!intel_connector->encoder) |
966 | continue; | |
321a1b30 EE |
967 | intel_encoder = intel_connector->encoder; |
968 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
969 | if (intel_encoder->hot_plug) | |
970 | intel_encoder->hot_plug(intel_encoder); | |
971 | if (intel_hpd_irq_event(dev, connector)) | |
972 | changed = true; | |
973 | } | |
974 | } | |
40ee3381 KP |
975 | mutex_unlock(&mode_config->mutex); |
976 | ||
321a1b30 EE |
977 | if (changed) |
978 | drm_kms_helper_hotplug_event(dev); | |
5ca58282 JB |
979 | } |
980 | ||
d0ecd7e2 | 981 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
f97108d1 | 982 | { |
2d1013dd | 983 | struct drm_i915_private *dev_priv = dev->dev_private; |
b5b72e89 | 984 | u32 busy_up, busy_down, max_avg, min_avg; |
9270388e | 985 | u8 new_delay; |
9270388e | 986 | |
d0ecd7e2 | 987 | spin_lock(&mchdev_lock); |
f97108d1 | 988 | |
73edd18f DV |
989 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
990 | ||
20e4d407 | 991 | new_delay = dev_priv->ips.cur_delay; |
9270388e | 992 | |
7648fa99 | 993 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
b5b72e89 MG |
994 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
995 | busy_down = I915_READ(RCPREVBSYTDNAVG); | |
f97108d1 JB |
996 | max_avg = I915_READ(RCBMAXAVG); |
997 | min_avg = I915_READ(RCBMINAVG); | |
998 | ||
999 | /* Handle RCS change request from hw */ | |
b5b72e89 | 1000 | if (busy_up > max_avg) { |
20e4d407 DV |
1001 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
1002 | new_delay = dev_priv->ips.cur_delay - 1; | |
1003 | if (new_delay < dev_priv->ips.max_delay) | |
1004 | new_delay = dev_priv->ips.max_delay; | |
b5b72e89 | 1005 | } else if (busy_down < min_avg) { |
20e4d407 DV |
1006 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
1007 | new_delay = dev_priv->ips.cur_delay + 1; | |
1008 | if (new_delay > dev_priv->ips.min_delay) | |
1009 | new_delay = dev_priv->ips.min_delay; | |
f97108d1 JB |
1010 | } |
1011 | ||
7648fa99 | 1012 | if (ironlake_set_drps(dev, new_delay)) |
20e4d407 | 1013 | dev_priv->ips.cur_delay = new_delay; |
f97108d1 | 1014 | |
d0ecd7e2 | 1015 | spin_unlock(&mchdev_lock); |
9270388e | 1016 | |
f97108d1 JB |
1017 | return; |
1018 | } | |
1019 | ||
549f7365 | 1020 | static void notify_ring(struct drm_device *dev, |
a4872ba6 | 1021 | struct intel_engine_cs *ring) |
549f7365 | 1022 | { |
93b0a4e0 | 1023 | if (!intel_ring_initialized(ring)) |
475553de CW |
1024 | return; |
1025 | ||
bcfcc8ba | 1026 | trace_i915_gem_request_notify(ring); |
9862e600 | 1027 | |
549f7365 | 1028 | wake_up_all(&ring->irq_queue); |
549f7365 CW |
1029 | } |
1030 | ||
31685c25 | 1031 | static u32 vlv_c0_residency(struct drm_i915_private *dev_priv, |
bf225f20 | 1032 | struct intel_rps_ei *rps_ei) |
31685c25 D |
1033 | { |
1034 | u32 cz_ts, cz_freq_khz; | |
1035 | u32 render_count, media_count; | |
1036 | u32 elapsed_render, elapsed_media, elapsed_time; | |
1037 | u32 residency = 0; | |
1038 | ||
1039 | cz_ts = vlv_punit_read(dev_priv, PUNIT_REG_CZ_TIMESTAMP); | |
1040 | cz_freq_khz = DIV_ROUND_CLOSEST(dev_priv->mem_freq * 1000, 4); | |
1041 | ||
1042 | render_count = I915_READ(VLV_RENDER_C0_COUNT_REG); | |
1043 | media_count = I915_READ(VLV_MEDIA_C0_COUNT_REG); | |
1044 | ||
bf225f20 CW |
1045 | if (rps_ei->cz_clock == 0) { |
1046 | rps_ei->cz_clock = cz_ts; | |
1047 | rps_ei->render_c0 = render_count; | |
1048 | rps_ei->media_c0 = media_count; | |
31685c25 D |
1049 | |
1050 | return dev_priv->rps.cur_freq; | |
1051 | } | |
1052 | ||
bf225f20 CW |
1053 | elapsed_time = cz_ts - rps_ei->cz_clock; |
1054 | rps_ei->cz_clock = cz_ts; | |
31685c25 | 1055 | |
bf225f20 CW |
1056 | elapsed_render = render_count - rps_ei->render_c0; |
1057 | rps_ei->render_c0 = render_count; | |
31685c25 | 1058 | |
bf225f20 CW |
1059 | elapsed_media = media_count - rps_ei->media_c0; |
1060 | rps_ei->media_c0 = media_count; | |
31685c25 D |
1061 | |
1062 | /* Convert all the counters into common unit of milli sec */ | |
1063 | elapsed_time /= VLV_CZ_CLOCK_TO_MILLI_SEC; | |
1064 | elapsed_render /= cz_freq_khz; | |
1065 | elapsed_media /= cz_freq_khz; | |
1066 | ||
1067 | /* | |
1068 | * Calculate overall C0 residency percentage | |
1069 | * only if elapsed time is non zero | |
1070 | */ | |
1071 | if (elapsed_time) { | |
1072 | residency = | |
1073 | ((max(elapsed_render, elapsed_media) * 100) | |
1074 | / elapsed_time); | |
1075 | } | |
1076 | ||
1077 | return residency; | |
1078 | } | |
1079 | ||
1080 | /** | |
1081 | * vlv_calc_delay_from_C0_counters - Increase/Decrease freq based on GPU | |
1082 | * busy-ness calculated from C0 counters of render & media power wells | |
1083 | * @dev_priv: DRM device private | |
1084 | * | |
1085 | */ | |
4fa79042 | 1086 | static int vlv_calc_delay_from_C0_counters(struct drm_i915_private *dev_priv) |
31685c25 D |
1087 | { |
1088 | u32 residency_C0_up = 0, residency_C0_down = 0; | |
4fa79042 | 1089 | int new_delay, adj; |
31685c25 D |
1090 | |
1091 | dev_priv->rps.ei_interrupt_count++; | |
1092 | ||
1093 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | |
1094 | ||
1095 | ||
bf225f20 CW |
1096 | if (dev_priv->rps.up_ei.cz_clock == 0) { |
1097 | vlv_c0_residency(dev_priv, &dev_priv->rps.up_ei); | |
1098 | vlv_c0_residency(dev_priv, &dev_priv->rps.down_ei); | |
31685c25 D |
1099 | return dev_priv->rps.cur_freq; |
1100 | } | |
1101 | ||
1102 | ||
1103 | /* | |
1104 | * To down throttle, C0 residency should be less than down threshold | |
1105 | * for continous EI intervals. So calculate down EI counters | |
1106 | * once in VLV_INT_COUNT_FOR_DOWN_EI | |
1107 | */ | |
1108 | if (dev_priv->rps.ei_interrupt_count == VLV_INT_COUNT_FOR_DOWN_EI) { | |
1109 | ||
1110 | dev_priv->rps.ei_interrupt_count = 0; | |
1111 | ||
1112 | residency_C0_down = vlv_c0_residency(dev_priv, | |
bf225f20 | 1113 | &dev_priv->rps.down_ei); |
31685c25 D |
1114 | } else { |
1115 | residency_C0_up = vlv_c0_residency(dev_priv, | |
bf225f20 | 1116 | &dev_priv->rps.up_ei); |
31685c25 D |
1117 | } |
1118 | ||
1119 | new_delay = dev_priv->rps.cur_freq; | |
1120 | ||
1121 | adj = dev_priv->rps.last_adj; | |
1122 | /* C0 residency is greater than UP threshold. Increase Frequency */ | |
1123 | if (residency_C0_up >= VLV_RP_UP_EI_THRESHOLD) { | |
1124 | if (adj > 0) | |
1125 | adj *= 2; | |
1126 | else | |
1127 | adj = 1; | |
1128 | ||
1129 | if (dev_priv->rps.cur_freq < dev_priv->rps.max_freq_softlimit) | |
1130 | new_delay = dev_priv->rps.cur_freq + adj; | |
1131 | ||
1132 | /* | |
1133 | * For better performance, jump directly | |
1134 | * to RPe if we're below it. | |
1135 | */ | |
1136 | if (new_delay < dev_priv->rps.efficient_freq) | |
1137 | new_delay = dev_priv->rps.efficient_freq; | |
1138 | ||
1139 | } else if (!dev_priv->rps.ei_interrupt_count && | |
1140 | (residency_C0_down < VLV_RP_DOWN_EI_THRESHOLD)) { | |
1141 | if (adj < 0) | |
1142 | adj *= 2; | |
1143 | else | |
1144 | adj = -1; | |
1145 | /* | |
1146 | * This means, C0 residency is less than down threshold over | |
1147 | * a period of VLV_INT_COUNT_FOR_DOWN_EI. So, reduce the freq | |
1148 | */ | |
1149 | if (dev_priv->rps.cur_freq > dev_priv->rps.min_freq_softlimit) | |
1150 | new_delay = dev_priv->rps.cur_freq + adj; | |
1151 | } | |
1152 | ||
1153 | return new_delay; | |
1154 | } | |
1155 | ||
4912d041 | 1156 | static void gen6_pm_rps_work(struct work_struct *work) |
3b8d8d91 | 1157 | { |
2d1013dd JN |
1158 | struct drm_i915_private *dev_priv = |
1159 | container_of(work, struct drm_i915_private, rps.work); | |
edbfdb45 | 1160 | u32 pm_iir; |
dd75fdc8 | 1161 | int new_delay, adj; |
4912d041 | 1162 | |
59cdb63d | 1163 | spin_lock_irq(&dev_priv->irq_lock); |
d4d70aa5 ID |
1164 | /* Speed up work cancelation during disabling rps interrupts. */ |
1165 | if (!dev_priv->rps.interrupts_enabled) { | |
1166 | spin_unlock_irq(&dev_priv->irq_lock); | |
1167 | return; | |
1168 | } | |
c6a828d3 DV |
1169 | pm_iir = dev_priv->rps.pm_iir; |
1170 | dev_priv->rps.pm_iir = 0; | |
a72fbc3a ID |
1171 | /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ |
1172 | gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); | |
59cdb63d | 1173 | spin_unlock_irq(&dev_priv->irq_lock); |
3b8d8d91 | 1174 | |
60611c13 | 1175 | /* Make sure we didn't queue anything we're not going to process. */ |
a6706b45 | 1176 | WARN_ON(pm_iir & ~dev_priv->pm_rps_events); |
60611c13 | 1177 | |
a6706b45 | 1178 | if ((pm_iir & dev_priv->pm_rps_events) == 0) |
3b8d8d91 JB |
1179 | return; |
1180 | ||
4fc688ce | 1181 | mutex_lock(&dev_priv->rps.hw_lock); |
7b9e0ae6 | 1182 | |
dd75fdc8 | 1183 | adj = dev_priv->rps.last_adj; |
7425034a | 1184 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
dd75fdc8 CW |
1185 | if (adj > 0) |
1186 | adj *= 2; | |
13a5660c D |
1187 | else { |
1188 | /* CHV needs even encode values */ | |
1189 | adj = IS_CHERRYVIEW(dev_priv->dev) ? 2 : 1; | |
1190 | } | |
b39fb297 | 1191 | new_delay = dev_priv->rps.cur_freq + adj; |
7425034a VS |
1192 | |
1193 | /* | |
1194 | * For better performance, jump directly | |
1195 | * to RPe if we're below it. | |
1196 | */ | |
b39fb297 BW |
1197 | if (new_delay < dev_priv->rps.efficient_freq) |
1198 | new_delay = dev_priv->rps.efficient_freq; | |
dd75fdc8 | 1199 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { |
b39fb297 BW |
1200 | if (dev_priv->rps.cur_freq > dev_priv->rps.efficient_freq) |
1201 | new_delay = dev_priv->rps.efficient_freq; | |
dd75fdc8 | 1202 | else |
b39fb297 | 1203 | new_delay = dev_priv->rps.min_freq_softlimit; |
dd75fdc8 | 1204 | adj = 0; |
31685c25 D |
1205 | } else if (pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) { |
1206 | new_delay = vlv_calc_delay_from_C0_counters(dev_priv); | |
dd75fdc8 CW |
1207 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { |
1208 | if (adj < 0) | |
1209 | adj *= 2; | |
13a5660c D |
1210 | else { |
1211 | /* CHV needs even encode values */ | |
1212 | adj = IS_CHERRYVIEW(dev_priv->dev) ? -2 : -1; | |
1213 | } | |
b39fb297 | 1214 | new_delay = dev_priv->rps.cur_freq + adj; |
dd75fdc8 | 1215 | } else { /* unknown event */ |
b39fb297 | 1216 | new_delay = dev_priv->rps.cur_freq; |
dd75fdc8 | 1217 | } |
3b8d8d91 | 1218 | |
79249636 BW |
1219 | /* sysfs frequency interfaces may have snuck in while servicing the |
1220 | * interrupt | |
1221 | */ | |
1272e7b8 | 1222 | new_delay = clamp_t(int, new_delay, |
b39fb297 BW |
1223 | dev_priv->rps.min_freq_softlimit, |
1224 | dev_priv->rps.max_freq_softlimit); | |
27544369 | 1225 | |
b39fb297 | 1226 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_freq; |
dd75fdc8 CW |
1227 | |
1228 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
1229 | valleyview_set_rps(dev_priv->dev, new_delay); | |
1230 | else | |
1231 | gen6_set_rps(dev_priv->dev, new_delay); | |
3b8d8d91 | 1232 | |
4fc688ce | 1233 | mutex_unlock(&dev_priv->rps.hw_lock); |
3b8d8d91 JB |
1234 | } |
1235 | ||
e3689190 BW |
1236 | |
1237 | /** | |
1238 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | |
1239 | * occurred. | |
1240 | * @work: workqueue struct | |
1241 | * | |
1242 | * Doesn't actually do anything except notify userspace. As a consequence of | |
1243 | * this event, userspace should try to remap the bad rows since statistically | |
1244 | * it is likely the same row is more likely to go bad again. | |
1245 | */ | |
1246 | static void ivybridge_parity_work(struct work_struct *work) | |
1247 | { | |
2d1013dd JN |
1248 | struct drm_i915_private *dev_priv = |
1249 | container_of(work, struct drm_i915_private, l3_parity.error_work); | |
e3689190 | 1250 | u32 error_status, row, bank, subbank; |
35a85ac6 | 1251 | char *parity_event[6]; |
e3689190 | 1252 | uint32_t misccpctl; |
35a85ac6 | 1253 | uint8_t slice = 0; |
e3689190 BW |
1254 | |
1255 | /* We must turn off DOP level clock gating to access the L3 registers. | |
1256 | * In order to prevent a get/put style interface, acquire struct mutex | |
1257 | * any time we access those registers. | |
1258 | */ | |
1259 | mutex_lock(&dev_priv->dev->struct_mutex); | |
1260 | ||
35a85ac6 BW |
1261 | /* If we've screwed up tracking, just let the interrupt fire again */ |
1262 | if (WARN_ON(!dev_priv->l3_parity.which_slice)) | |
1263 | goto out; | |
1264 | ||
e3689190 BW |
1265 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
1266 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
1267 | POSTING_READ(GEN7_MISCCPCTL); | |
1268 | ||
35a85ac6 BW |
1269 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { |
1270 | u32 reg; | |
e3689190 | 1271 | |
35a85ac6 BW |
1272 | slice--; |
1273 | if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) | |
1274 | break; | |
e3689190 | 1275 | |
35a85ac6 | 1276 | dev_priv->l3_parity.which_slice &= ~(1<<slice); |
e3689190 | 1277 | |
35a85ac6 | 1278 | reg = GEN7_L3CDERRST1 + (slice * 0x200); |
e3689190 | 1279 | |
35a85ac6 BW |
1280 | error_status = I915_READ(reg); |
1281 | row = GEN7_PARITY_ERROR_ROW(error_status); | |
1282 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
1283 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
1284 | ||
1285 | I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); | |
1286 | POSTING_READ(reg); | |
1287 | ||
1288 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; | |
1289 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
1290 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
1291 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
1292 | parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); | |
1293 | parity_event[5] = NULL; | |
1294 | ||
5bdebb18 | 1295 | kobject_uevent_env(&dev_priv->dev->primary->kdev->kobj, |
35a85ac6 | 1296 | KOBJ_CHANGE, parity_event); |
e3689190 | 1297 | |
35a85ac6 BW |
1298 | DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", |
1299 | slice, row, bank, subbank); | |
e3689190 | 1300 | |
35a85ac6 BW |
1301 | kfree(parity_event[4]); |
1302 | kfree(parity_event[3]); | |
1303 | kfree(parity_event[2]); | |
1304 | kfree(parity_event[1]); | |
1305 | } | |
e3689190 | 1306 | |
35a85ac6 | 1307 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
e3689190 | 1308 | |
35a85ac6 BW |
1309 | out: |
1310 | WARN_ON(dev_priv->l3_parity.which_slice); | |
4cb21832 | 1311 | spin_lock_irq(&dev_priv->irq_lock); |
480c8033 | 1312 | gen5_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); |
4cb21832 | 1313 | spin_unlock_irq(&dev_priv->irq_lock); |
35a85ac6 BW |
1314 | |
1315 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
e3689190 BW |
1316 | } |
1317 | ||
35a85ac6 | 1318 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) |
e3689190 | 1319 | { |
2d1013dd | 1320 | struct drm_i915_private *dev_priv = dev->dev_private; |
e3689190 | 1321 | |
040d2baa | 1322 | if (!HAS_L3_DPF(dev)) |
e3689190 BW |
1323 | return; |
1324 | ||
d0ecd7e2 | 1325 | spin_lock(&dev_priv->irq_lock); |
480c8033 | 1326 | gen5_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); |
d0ecd7e2 | 1327 | spin_unlock(&dev_priv->irq_lock); |
e3689190 | 1328 | |
35a85ac6 BW |
1329 | iir &= GT_PARITY_ERROR(dev); |
1330 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) | |
1331 | dev_priv->l3_parity.which_slice |= 1 << 1; | |
1332 | ||
1333 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) | |
1334 | dev_priv->l3_parity.which_slice |= 1 << 0; | |
1335 | ||
a4da4fa4 | 1336 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
e3689190 BW |
1337 | } |
1338 | ||
f1af8fc1 PZ |
1339 | static void ilk_gt_irq_handler(struct drm_device *dev, |
1340 | struct drm_i915_private *dev_priv, | |
1341 | u32 gt_iir) | |
1342 | { | |
1343 | if (gt_iir & | |
1344 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
1345 | notify_ring(dev, &dev_priv->ring[RCS]); | |
1346 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | |
1347 | notify_ring(dev, &dev_priv->ring[VCS]); | |
1348 | } | |
1349 | ||
e7b4c6b1 DV |
1350 | static void snb_gt_irq_handler(struct drm_device *dev, |
1351 | struct drm_i915_private *dev_priv, | |
1352 | u32 gt_iir) | |
1353 | { | |
1354 | ||
cc609d5d BW |
1355 | if (gt_iir & |
1356 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
e7b4c6b1 | 1357 | notify_ring(dev, &dev_priv->ring[RCS]); |
cc609d5d | 1358 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
e7b4c6b1 | 1359 | notify_ring(dev, &dev_priv->ring[VCS]); |
cc609d5d | 1360 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
e7b4c6b1 DV |
1361 | notify_ring(dev, &dev_priv->ring[BCS]); |
1362 | ||
cc609d5d BW |
1363 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
1364 | GT_BSD_CS_ERROR_INTERRUPT | | |
aaecdf61 DV |
1365 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) |
1366 | DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); | |
e3689190 | 1367 | |
35a85ac6 BW |
1368 | if (gt_iir & GT_PARITY_ERROR(dev)) |
1369 | ivybridge_parity_error_irq_handler(dev, gt_iir); | |
e7b4c6b1 DV |
1370 | } |
1371 | ||
abd58f01 BW |
1372 | static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev, |
1373 | struct drm_i915_private *dev_priv, | |
1374 | u32 master_ctl) | |
1375 | { | |
e981e7b1 | 1376 | struct intel_engine_cs *ring; |
abd58f01 BW |
1377 | u32 rcs, bcs, vcs; |
1378 | uint32_t tmp = 0; | |
1379 | irqreturn_t ret = IRQ_NONE; | |
1380 | ||
1381 | if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { | |
1382 | tmp = I915_READ(GEN8_GT_IIR(0)); | |
1383 | if (tmp) { | |
38cc46d7 | 1384 | I915_WRITE(GEN8_GT_IIR(0), tmp); |
abd58f01 | 1385 | ret = IRQ_HANDLED; |
e981e7b1 | 1386 | |
abd58f01 | 1387 | rcs = tmp >> GEN8_RCS_IRQ_SHIFT; |
e981e7b1 | 1388 | ring = &dev_priv->ring[RCS]; |
abd58f01 | 1389 | if (rcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 TD |
1390 | notify_ring(dev, ring); |
1391 | if (rcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
3f7531c3 | 1392 | intel_lrc_irq_handler(ring); |
e981e7b1 TD |
1393 | |
1394 | bcs = tmp >> GEN8_BCS_IRQ_SHIFT; | |
1395 | ring = &dev_priv->ring[BCS]; | |
abd58f01 | 1396 | if (bcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 TD |
1397 | notify_ring(dev, ring); |
1398 | if (bcs & GT_CONTEXT_SWITCH_INTERRUPT) | |
3f7531c3 | 1399 | intel_lrc_irq_handler(ring); |
abd58f01 BW |
1400 | } else |
1401 | DRM_ERROR("The master control interrupt lied (GT0)!\n"); | |
1402 | } | |
1403 | ||
85f9b5f9 | 1404 | if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { |
abd58f01 BW |
1405 | tmp = I915_READ(GEN8_GT_IIR(1)); |
1406 | if (tmp) { | |
38cc46d7 | 1407 | I915_WRITE(GEN8_GT_IIR(1), tmp); |
abd58f01 | 1408 | ret = IRQ_HANDLED; |
e981e7b1 | 1409 | |
abd58f01 | 1410 | vcs = tmp >> GEN8_VCS1_IRQ_SHIFT; |
e981e7b1 | 1411 | ring = &dev_priv->ring[VCS]; |
abd58f01 | 1412 | if (vcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 | 1413 | notify_ring(dev, ring); |
73d477f6 | 1414 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
3f7531c3 | 1415 | intel_lrc_irq_handler(ring); |
e981e7b1 | 1416 | |
85f9b5f9 | 1417 | vcs = tmp >> GEN8_VCS2_IRQ_SHIFT; |
e981e7b1 | 1418 | ring = &dev_priv->ring[VCS2]; |
85f9b5f9 | 1419 | if (vcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 | 1420 | notify_ring(dev, ring); |
73d477f6 | 1421 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
3f7531c3 | 1422 | intel_lrc_irq_handler(ring); |
abd58f01 BW |
1423 | } else |
1424 | DRM_ERROR("The master control interrupt lied (GT1)!\n"); | |
1425 | } | |
1426 | ||
0961021a BW |
1427 | if (master_ctl & GEN8_GT_PM_IRQ) { |
1428 | tmp = I915_READ(GEN8_GT_IIR(2)); | |
1429 | if (tmp & dev_priv->pm_rps_events) { | |
0961021a BW |
1430 | I915_WRITE(GEN8_GT_IIR(2), |
1431 | tmp & dev_priv->pm_rps_events); | |
38cc46d7 | 1432 | ret = IRQ_HANDLED; |
c9a9a268 | 1433 | gen6_rps_irq_handler(dev_priv, tmp); |
0961021a BW |
1434 | } else |
1435 | DRM_ERROR("The master control interrupt lied (PM)!\n"); | |
1436 | } | |
1437 | ||
abd58f01 BW |
1438 | if (master_ctl & GEN8_GT_VECS_IRQ) { |
1439 | tmp = I915_READ(GEN8_GT_IIR(3)); | |
1440 | if (tmp) { | |
38cc46d7 | 1441 | I915_WRITE(GEN8_GT_IIR(3), tmp); |
abd58f01 | 1442 | ret = IRQ_HANDLED; |
e981e7b1 | 1443 | |
abd58f01 | 1444 | vcs = tmp >> GEN8_VECS_IRQ_SHIFT; |
e981e7b1 | 1445 | ring = &dev_priv->ring[VECS]; |
abd58f01 | 1446 | if (vcs & GT_RENDER_USER_INTERRUPT) |
e981e7b1 | 1447 | notify_ring(dev, ring); |
73d477f6 | 1448 | if (vcs & GT_CONTEXT_SWITCH_INTERRUPT) |
3f7531c3 | 1449 | intel_lrc_irq_handler(ring); |
abd58f01 BW |
1450 | } else |
1451 | DRM_ERROR("The master control interrupt lied (GT3)!\n"); | |
1452 | } | |
1453 | ||
1454 | return ret; | |
1455 | } | |
1456 | ||
b543fb04 EE |
1457 | #define HPD_STORM_DETECT_PERIOD 1000 |
1458 | #define HPD_STORM_THRESHOLD 5 | |
1459 | ||
07c338ce | 1460 | static int pch_port_to_hotplug_shift(enum port port) |
13cf5504 DA |
1461 | { |
1462 | switch (port) { | |
1463 | case PORT_A: | |
1464 | case PORT_E: | |
1465 | default: | |
1466 | return -1; | |
1467 | case PORT_B: | |
1468 | return 0; | |
1469 | case PORT_C: | |
1470 | return 8; | |
1471 | case PORT_D: | |
1472 | return 16; | |
1473 | } | |
1474 | } | |
1475 | ||
07c338ce | 1476 | static int i915_port_to_hotplug_shift(enum port port) |
13cf5504 DA |
1477 | { |
1478 | switch (port) { | |
1479 | case PORT_A: | |
1480 | case PORT_E: | |
1481 | default: | |
1482 | return -1; | |
1483 | case PORT_B: | |
1484 | return 17; | |
1485 | case PORT_C: | |
1486 | return 19; | |
1487 | case PORT_D: | |
1488 | return 21; | |
1489 | } | |
1490 | } | |
1491 | ||
1492 | static inline enum port get_port_from_pin(enum hpd_pin pin) | |
1493 | { | |
1494 | switch (pin) { | |
1495 | case HPD_PORT_B: | |
1496 | return PORT_B; | |
1497 | case HPD_PORT_C: | |
1498 | return PORT_C; | |
1499 | case HPD_PORT_D: | |
1500 | return PORT_D; | |
1501 | default: | |
1502 | return PORT_A; /* no hpd */ | |
1503 | } | |
1504 | } | |
1505 | ||
10a504de | 1506 | static inline void intel_hpd_irq_handler(struct drm_device *dev, |
22062dba | 1507 | u32 hotplug_trigger, |
13cf5504 | 1508 | u32 dig_hotplug_reg, |
22062dba | 1509 | const u32 *hpd) |
b543fb04 | 1510 | { |
2d1013dd | 1511 | struct drm_i915_private *dev_priv = dev->dev_private; |
b543fb04 | 1512 | int i; |
13cf5504 | 1513 | enum port port; |
10a504de | 1514 | bool storm_detected = false; |
13cf5504 DA |
1515 | bool queue_dig = false, queue_hp = false; |
1516 | u32 dig_shift; | |
1517 | u32 dig_port_mask = 0; | |
b543fb04 | 1518 | |
91d131d2 DV |
1519 | if (!hotplug_trigger) |
1520 | return; | |
1521 | ||
13cf5504 DA |
1522 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x\n", |
1523 | hotplug_trigger, dig_hotplug_reg); | |
cc9bd499 | 1524 | |
b5ea2d56 | 1525 | spin_lock(&dev_priv->irq_lock); |
b543fb04 | 1526 | for (i = 1; i < HPD_NUM_PINS; i++) { |
13cf5504 DA |
1527 | if (!(hpd[i] & hotplug_trigger)) |
1528 | continue; | |
1529 | ||
1530 | port = get_port_from_pin(i); | |
1531 | if (port && dev_priv->hpd_irq_port[port]) { | |
1532 | bool long_hpd; | |
1533 | ||
07c338ce JN |
1534 | if (HAS_PCH_SPLIT(dev)) { |
1535 | dig_shift = pch_port_to_hotplug_shift(port); | |
13cf5504 | 1536 | long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; |
07c338ce JN |
1537 | } else { |
1538 | dig_shift = i915_port_to_hotplug_shift(port); | |
1539 | long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; | |
13cf5504 DA |
1540 | } |
1541 | ||
26fbb774 VS |
1542 | DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", |
1543 | port_name(port), | |
1544 | long_hpd ? "long" : "short"); | |
13cf5504 DA |
1545 | /* for long HPD pulses we want to have the digital queue happen, |
1546 | but we still want HPD storm detection to function. */ | |
1547 | if (long_hpd) { | |
1548 | dev_priv->long_hpd_port_mask |= (1 << port); | |
1549 | dig_port_mask |= hpd[i]; | |
1550 | } else { | |
1551 | /* for short HPD just trigger the digital queue */ | |
1552 | dev_priv->short_hpd_port_mask |= (1 << port); | |
1553 | hotplug_trigger &= ~hpd[i]; | |
1554 | } | |
1555 | queue_dig = true; | |
1556 | } | |
1557 | } | |
821450c6 | 1558 | |
13cf5504 | 1559 | for (i = 1; i < HPD_NUM_PINS; i++) { |
3ff04a16 DV |
1560 | if (hpd[i] & hotplug_trigger && |
1561 | dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) { | |
1562 | /* | |
1563 | * On GMCH platforms the interrupt mask bits only | |
1564 | * prevent irq generation, not the setting of the | |
1565 | * hotplug bits itself. So only WARN about unexpected | |
1566 | * interrupts on saner platforms. | |
1567 | */ | |
1568 | WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev), | |
1569 | "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n", | |
1570 | hotplug_trigger, i, hpd[i]); | |
1571 | ||
1572 | continue; | |
1573 | } | |
b8f102e8 | 1574 | |
b543fb04 EE |
1575 | if (!(hpd[i] & hotplug_trigger) || |
1576 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | |
1577 | continue; | |
1578 | ||
13cf5504 DA |
1579 | if (!(dig_port_mask & hpd[i])) { |
1580 | dev_priv->hpd_event_bits |= (1 << i); | |
1581 | queue_hp = true; | |
1582 | } | |
1583 | ||
b543fb04 EE |
1584 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, |
1585 | dev_priv->hpd_stats[i].hpd_last_jiffies | |
1586 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | |
1587 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | |
1588 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
b8f102e8 | 1589 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); |
b543fb04 EE |
1590 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { |
1591 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | |
142e2398 | 1592 | dev_priv->hpd_event_bits &= ~(1 << i); |
b543fb04 | 1593 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
10a504de | 1594 | storm_detected = true; |
b543fb04 EE |
1595 | } else { |
1596 | dev_priv->hpd_stats[i].hpd_cnt++; | |
b8f102e8 EE |
1597 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, |
1598 | dev_priv->hpd_stats[i].hpd_cnt); | |
b543fb04 EE |
1599 | } |
1600 | } | |
1601 | ||
10a504de DV |
1602 | if (storm_detected) |
1603 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 1604 | spin_unlock(&dev_priv->irq_lock); |
5876fa0d | 1605 | |
645416f5 DV |
1606 | /* |
1607 | * Our hotplug handler can grab modeset locks (by calling down into the | |
1608 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | |
1609 | * queue for otherwise the flush_work in the pageflip code will | |
1610 | * deadlock. | |
1611 | */ | |
13cf5504 | 1612 | if (queue_dig) |
0e32b39c | 1613 | queue_work(dev_priv->dp_wq, &dev_priv->dig_port_work); |
13cf5504 DA |
1614 | if (queue_hp) |
1615 | schedule_work(&dev_priv->hotplug_work); | |
b543fb04 EE |
1616 | } |
1617 | ||
515ac2bb DV |
1618 | static void gmbus_irq_handler(struct drm_device *dev) |
1619 | { | |
2d1013dd | 1620 | struct drm_i915_private *dev_priv = dev->dev_private; |
28c70f16 | 1621 | |
28c70f16 | 1622 | wake_up_all(&dev_priv->gmbus_wait_queue); |
515ac2bb DV |
1623 | } |
1624 | ||
ce99c256 DV |
1625 | static void dp_aux_irq_handler(struct drm_device *dev) |
1626 | { | |
2d1013dd | 1627 | struct drm_i915_private *dev_priv = dev->dev_private; |
9ee32fea | 1628 | |
9ee32fea | 1629 | wake_up_all(&dev_priv->gmbus_wait_queue); |
ce99c256 DV |
1630 | } |
1631 | ||
8bf1e9f1 | 1632 | #if defined(CONFIG_DEBUG_FS) |
277de95e DV |
1633 | static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, |
1634 | uint32_t crc0, uint32_t crc1, | |
1635 | uint32_t crc2, uint32_t crc3, | |
1636 | uint32_t crc4) | |
8bf1e9f1 SH |
1637 | { |
1638 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1639 | struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe]; | |
1640 | struct intel_pipe_crc_entry *entry; | |
ac2300d4 | 1641 | int head, tail; |
b2c88f5b | 1642 | |
d538bbdf DL |
1643 | spin_lock(&pipe_crc->lock); |
1644 | ||
0c912c79 | 1645 | if (!pipe_crc->entries) { |
d538bbdf | 1646 | spin_unlock(&pipe_crc->lock); |
34273620 | 1647 | DRM_DEBUG_KMS("spurious interrupt\n"); |
0c912c79 DL |
1648 | return; |
1649 | } | |
1650 | ||
d538bbdf DL |
1651 | head = pipe_crc->head; |
1652 | tail = pipe_crc->tail; | |
b2c88f5b DL |
1653 | |
1654 | if (CIRC_SPACE(head, tail, INTEL_PIPE_CRC_ENTRIES_NR) < 1) { | |
d538bbdf | 1655 | spin_unlock(&pipe_crc->lock); |
b2c88f5b DL |
1656 | DRM_ERROR("CRC buffer overflowing\n"); |
1657 | return; | |
1658 | } | |
1659 | ||
1660 | entry = &pipe_crc->entries[head]; | |
8bf1e9f1 | 1661 | |
8bc5e955 | 1662 | entry->frame = dev->driver->get_vblank_counter(dev, pipe); |
eba94eb9 DV |
1663 | entry->crc[0] = crc0; |
1664 | entry->crc[1] = crc1; | |
1665 | entry->crc[2] = crc2; | |
1666 | entry->crc[3] = crc3; | |
1667 | entry->crc[4] = crc4; | |
b2c88f5b DL |
1668 | |
1669 | head = (head + 1) & (INTEL_PIPE_CRC_ENTRIES_NR - 1); | |
d538bbdf DL |
1670 | pipe_crc->head = head; |
1671 | ||
1672 | spin_unlock(&pipe_crc->lock); | |
07144428 DL |
1673 | |
1674 | wake_up_interruptible(&pipe_crc->wq); | |
8bf1e9f1 | 1675 | } |
277de95e DV |
1676 | #else |
1677 | static inline void | |
1678 | display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, | |
1679 | uint32_t crc0, uint32_t crc1, | |
1680 | uint32_t crc2, uint32_t crc3, | |
1681 | uint32_t crc4) {} | |
1682 | #endif | |
1683 | ||
eba94eb9 | 1684 | |
277de95e | 1685 | static void hsw_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
5a69b89f DV |
1686 | { |
1687 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1688 | ||
277de95e DV |
1689 | display_pipe_crc_irq_handler(dev, pipe, |
1690 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | |
1691 | 0, 0, 0, 0); | |
5a69b89f DV |
1692 | } |
1693 | ||
277de95e | 1694 | static void ivb_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
eba94eb9 DV |
1695 | { |
1696 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1697 | ||
277de95e DV |
1698 | display_pipe_crc_irq_handler(dev, pipe, |
1699 | I915_READ(PIPE_CRC_RES_1_IVB(pipe)), | |
1700 | I915_READ(PIPE_CRC_RES_2_IVB(pipe)), | |
1701 | I915_READ(PIPE_CRC_RES_3_IVB(pipe)), | |
1702 | I915_READ(PIPE_CRC_RES_4_IVB(pipe)), | |
1703 | I915_READ(PIPE_CRC_RES_5_IVB(pipe))); | |
eba94eb9 | 1704 | } |
5b3a856b | 1705 | |
277de95e | 1706 | static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) |
5b3a856b DV |
1707 | { |
1708 | struct drm_i915_private *dev_priv = dev->dev_private; | |
0b5c5ed0 DV |
1709 | uint32_t res1, res2; |
1710 | ||
1711 | if (INTEL_INFO(dev)->gen >= 3) | |
1712 | res1 = I915_READ(PIPE_CRC_RES_RES1_I915(pipe)); | |
1713 | else | |
1714 | res1 = 0; | |
1715 | ||
1716 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | |
1717 | res2 = I915_READ(PIPE_CRC_RES_RES2_G4X(pipe)); | |
1718 | else | |
1719 | res2 = 0; | |
5b3a856b | 1720 | |
277de95e DV |
1721 | display_pipe_crc_irq_handler(dev, pipe, |
1722 | I915_READ(PIPE_CRC_RES_RED(pipe)), | |
1723 | I915_READ(PIPE_CRC_RES_GREEN(pipe)), | |
1724 | I915_READ(PIPE_CRC_RES_BLUE(pipe)), | |
1725 | res1, res2); | |
5b3a856b | 1726 | } |
8bf1e9f1 | 1727 | |
1403c0d4 PZ |
1728 | /* The RPS events need forcewake, so we add them to a work queue and mask their |
1729 | * IMR bits until the work is done. Other interrupts can be processed without | |
1730 | * the work queue. */ | |
1731 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |
baf02a1f | 1732 | { |
4a74de82 ID |
1733 | /* TODO: RPS on GEN9+ is not supported yet. */ |
1734 | if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, | |
1735 | "GEN9+: unexpected RPS IRQ\n")) | |
132f3f17 ID |
1736 | return; |
1737 | ||
a6706b45 | 1738 | if (pm_iir & dev_priv->pm_rps_events) { |
59cdb63d | 1739 | spin_lock(&dev_priv->irq_lock); |
480c8033 | 1740 | gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); |
d4d70aa5 ID |
1741 | if (dev_priv->rps.interrupts_enabled) { |
1742 | dev_priv->rps.pm_iir |= pm_iir & dev_priv->pm_rps_events; | |
1743 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
1744 | } | |
59cdb63d | 1745 | spin_unlock(&dev_priv->irq_lock); |
baf02a1f | 1746 | } |
baf02a1f | 1747 | |
c9a9a268 ID |
1748 | if (INTEL_INFO(dev_priv)->gen >= 8) |
1749 | return; | |
1750 | ||
1403c0d4 PZ |
1751 | if (HAS_VEBOX(dev_priv->dev)) { |
1752 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | |
1753 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | |
12638c57 | 1754 | |
aaecdf61 DV |
1755 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) |
1756 | DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); | |
12638c57 | 1757 | } |
baf02a1f BW |
1758 | } |
1759 | ||
8d7849db VS |
1760 | static bool intel_pipe_handle_vblank(struct drm_device *dev, enum pipe pipe) |
1761 | { | |
8d7849db VS |
1762 | if (!drm_handle_vblank(dev, pipe)) |
1763 | return false; | |
1764 | ||
8d7849db VS |
1765 | return true; |
1766 | } | |
1767 | ||
c1874ed7 ID |
1768 | static void valleyview_pipestat_irq_handler(struct drm_device *dev, u32 iir) |
1769 | { | |
1770 | struct drm_i915_private *dev_priv = dev->dev_private; | |
91d181dd | 1771 | u32 pipe_stats[I915_MAX_PIPES] = { }; |
c1874ed7 ID |
1772 | int pipe; |
1773 | ||
58ead0d7 | 1774 | spin_lock(&dev_priv->irq_lock); |
055e393f | 1775 | for_each_pipe(dev_priv, pipe) { |
91d181dd | 1776 | int reg; |
bbb5eebf | 1777 | u32 mask, iir_bit = 0; |
91d181dd | 1778 | |
bbb5eebf DV |
1779 | /* |
1780 | * PIPESTAT bits get signalled even when the interrupt is | |
1781 | * disabled with the mask bits, and some of the status bits do | |
1782 | * not generate interrupts at all (like the underrun bit). Hence | |
1783 | * we need to be careful that we only handle what we want to | |
1784 | * handle. | |
1785 | */ | |
0f239f4c DV |
1786 | |
1787 | /* fifo underruns are filterered in the underrun handler. */ | |
1788 | mask = PIPE_FIFO_UNDERRUN_STATUS; | |
bbb5eebf DV |
1789 | |
1790 | switch (pipe) { | |
1791 | case PIPE_A: | |
1792 | iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; | |
1793 | break; | |
1794 | case PIPE_B: | |
1795 | iir_bit = I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | |
1796 | break; | |
3278f67f VS |
1797 | case PIPE_C: |
1798 | iir_bit = I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | |
1799 | break; | |
bbb5eebf DV |
1800 | } |
1801 | if (iir & iir_bit) | |
1802 | mask |= dev_priv->pipestat_irq_mask[pipe]; | |
1803 | ||
1804 | if (!mask) | |
91d181dd ID |
1805 | continue; |
1806 | ||
1807 | reg = PIPESTAT(pipe); | |
bbb5eebf DV |
1808 | mask |= PIPESTAT_INT_ENABLE_MASK; |
1809 | pipe_stats[pipe] = I915_READ(reg) & mask; | |
c1874ed7 ID |
1810 | |
1811 | /* | |
1812 | * Clear the PIPE*STAT regs before the IIR | |
1813 | */ | |
91d181dd ID |
1814 | if (pipe_stats[pipe] & (PIPE_FIFO_UNDERRUN_STATUS | |
1815 | PIPESTAT_INT_STATUS_MASK)) | |
c1874ed7 ID |
1816 | I915_WRITE(reg, pipe_stats[pipe]); |
1817 | } | |
58ead0d7 | 1818 | spin_unlock(&dev_priv->irq_lock); |
c1874ed7 | 1819 | |
055e393f | 1820 | for_each_pipe(dev_priv, pipe) { |
d6bbafa1 CW |
1821 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
1822 | intel_pipe_handle_vblank(dev, pipe)) | |
1823 | intel_check_page_flip(dev, pipe); | |
c1874ed7 | 1824 | |
579a9b0e | 1825 | if (pipe_stats[pipe] & PLANE_FLIP_DONE_INT_STATUS_VLV) { |
c1874ed7 ID |
1826 | intel_prepare_page_flip(dev, pipe); |
1827 | intel_finish_page_flip(dev, pipe); | |
1828 | } | |
1829 | ||
1830 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
1831 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
1832 | ||
1f7247c0 DV |
1833 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
1834 | intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); | |
c1874ed7 ID |
1835 | } |
1836 | ||
1837 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) | |
1838 | gmbus_irq_handler(dev); | |
1839 | } | |
1840 | ||
16c6c56b VS |
1841 | static void i9xx_hpd_irq_handler(struct drm_device *dev) |
1842 | { | |
1843 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1844 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
1845 | ||
3ff60f89 OM |
1846 | if (hotplug_status) { |
1847 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); | |
1848 | /* | |
1849 | * Make sure hotplug status is cleared before we clear IIR, or else we | |
1850 | * may miss hotplug events. | |
1851 | */ | |
1852 | POSTING_READ(PORT_HOTPLUG_STAT); | |
16c6c56b | 1853 | |
3ff60f89 OM |
1854 | if (IS_G4X(dev)) { |
1855 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_G4X; | |
16c6c56b | 1856 | |
13cf5504 | 1857 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_g4x); |
3ff60f89 OM |
1858 | } else { |
1859 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; | |
16c6c56b | 1860 | |
13cf5504 | 1861 | intel_hpd_irq_handler(dev, hotplug_trigger, 0, hpd_status_i915); |
3ff60f89 | 1862 | } |
16c6c56b | 1863 | |
3ff60f89 OM |
1864 | if ((IS_G4X(dev) || IS_VALLEYVIEW(dev)) && |
1865 | hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X) | |
1866 | dp_aux_irq_handler(dev); | |
1867 | } | |
16c6c56b VS |
1868 | } |
1869 | ||
ff1f525e | 1870 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
7e231dbe | 1871 | { |
45a83f84 | 1872 | struct drm_device *dev = arg; |
2d1013dd | 1873 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe JB |
1874 | u32 iir, gt_iir, pm_iir; |
1875 | irqreturn_t ret = IRQ_NONE; | |
7e231dbe | 1876 | |
7e231dbe | 1877 | while (true) { |
3ff60f89 OM |
1878 | /* Find, clear, then process each source of interrupt */ |
1879 | ||
7e231dbe | 1880 | gt_iir = I915_READ(GTIIR); |
3ff60f89 OM |
1881 | if (gt_iir) |
1882 | I915_WRITE(GTIIR, gt_iir); | |
1883 | ||
7e231dbe | 1884 | pm_iir = I915_READ(GEN6_PMIIR); |
3ff60f89 OM |
1885 | if (pm_iir) |
1886 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
1887 | ||
1888 | iir = I915_READ(VLV_IIR); | |
1889 | if (iir) { | |
1890 | /* Consume port before clearing IIR or we'll miss events */ | |
1891 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | |
1892 | i9xx_hpd_irq_handler(dev); | |
1893 | I915_WRITE(VLV_IIR, iir); | |
1894 | } | |
7e231dbe JB |
1895 | |
1896 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
1897 | goto out; | |
1898 | ||
1899 | ret = IRQ_HANDLED; | |
1900 | ||
3ff60f89 OM |
1901 | if (gt_iir) |
1902 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
60611c13 | 1903 | if (pm_iir) |
d0ecd7e2 | 1904 | gen6_rps_irq_handler(dev_priv, pm_iir); |
3ff60f89 OM |
1905 | /* Call regardless, as some status bits might not be |
1906 | * signalled in iir */ | |
1907 | valleyview_pipestat_irq_handler(dev, iir); | |
7e231dbe JB |
1908 | } |
1909 | ||
1910 | out: | |
1911 | return ret; | |
1912 | } | |
1913 | ||
43f328d7 VS |
1914 | static irqreturn_t cherryview_irq_handler(int irq, void *arg) |
1915 | { | |
45a83f84 | 1916 | struct drm_device *dev = arg; |
43f328d7 VS |
1917 | struct drm_i915_private *dev_priv = dev->dev_private; |
1918 | u32 master_ctl, iir; | |
1919 | irqreturn_t ret = IRQ_NONE; | |
43f328d7 | 1920 | |
8e5fd599 VS |
1921 | for (;;) { |
1922 | master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; | |
1923 | iir = I915_READ(VLV_IIR); | |
43f328d7 | 1924 | |
8e5fd599 VS |
1925 | if (master_ctl == 0 && iir == 0) |
1926 | break; | |
43f328d7 | 1927 | |
27b6c122 OM |
1928 | ret = IRQ_HANDLED; |
1929 | ||
8e5fd599 | 1930 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
43f328d7 | 1931 | |
27b6c122 | 1932 | /* Find, clear, then process each source of interrupt */ |
43f328d7 | 1933 | |
27b6c122 OM |
1934 | if (iir) { |
1935 | /* Consume port before clearing IIR or we'll miss events */ | |
1936 | if (iir & I915_DISPLAY_PORT_INTERRUPT) | |
1937 | i9xx_hpd_irq_handler(dev); | |
1938 | I915_WRITE(VLV_IIR, iir); | |
1939 | } | |
43f328d7 | 1940 | |
27b6c122 | 1941 | gen8_gt_irq_handler(dev, dev_priv, master_ctl); |
43f328d7 | 1942 | |
27b6c122 OM |
1943 | /* Call regardless, as some status bits might not be |
1944 | * signalled in iir */ | |
1945 | valleyview_pipestat_irq_handler(dev, iir); | |
43f328d7 | 1946 | |
8e5fd599 VS |
1947 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); |
1948 | POSTING_READ(GEN8_MASTER_IRQ); | |
8e5fd599 | 1949 | } |
3278f67f | 1950 | |
43f328d7 VS |
1951 | return ret; |
1952 | } | |
1953 | ||
23e81d69 | 1954 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
776ad806 | 1955 | { |
2d1013dd | 1956 | struct drm_i915_private *dev_priv = dev->dev_private; |
9db4a9c7 | 1957 | int pipe; |
b543fb04 | 1958 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
13cf5504 DA |
1959 | u32 dig_hotplug_reg; |
1960 | ||
1961 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | |
1962 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | |
776ad806 | 1963 | |
13cf5504 | 1964 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_ibx); |
91d131d2 | 1965 | |
cfc33bf7 VS |
1966 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
1967 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | |
1968 | SDE_AUDIO_POWER_SHIFT); | |
776ad806 | 1969 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
cfc33bf7 VS |
1970 | port_name(port)); |
1971 | } | |
776ad806 | 1972 | |
ce99c256 DV |
1973 | if (pch_iir & SDE_AUX_MASK) |
1974 | dp_aux_irq_handler(dev); | |
1975 | ||
776ad806 | 1976 | if (pch_iir & SDE_GMBUS) |
515ac2bb | 1977 | gmbus_irq_handler(dev); |
776ad806 JB |
1978 | |
1979 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | |
1980 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | |
1981 | ||
1982 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | |
1983 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | |
1984 | ||
1985 | if (pch_iir & SDE_POISON) | |
1986 | DRM_ERROR("PCH poison interrupt\n"); | |
1987 | ||
9db4a9c7 | 1988 | if (pch_iir & SDE_FDI_MASK) |
055e393f | 1989 | for_each_pipe(dev_priv, pipe) |
9db4a9c7 JB |
1990 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
1991 | pipe_name(pipe), | |
1992 | I915_READ(FDI_RX_IIR(pipe))); | |
776ad806 JB |
1993 | |
1994 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | |
1995 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | |
1996 | ||
1997 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | |
1998 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | |
1999 | ||
776ad806 | 2000 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
1f7247c0 | 2001 | intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); |
8664281b PZ |
2002 | |
2003 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | |
1f7247c0 | 2004 | intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); |
8664281b PZ |
2005 | } |
2006 | ||
2007 | static void ivb_err_int_handler(struct drm_device *dev) | |
2008 | { | |
2009 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2010 | u32 err_int = I915_READ(GEN7_ERR_INT); | |
5a69b89f | 2011 | enum pipe pipe; |
8664281b | 2012 | |
de032bf4 PZ |
2013 | if (err_int & ERR_INT_POISON) |
2014 | DRM_ERROR("Poison interrupt\n"); | |
2015 | ||
055e393f | 2016 | for_each_pipe(dev_priv, pipe) { |
1f7247c0 DV |
2017 | if (err_int & ERR_INT_FIFO_UNDERRUN(pipe)) |
2018 | intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); | |
8bf1e9f1 | 2019 | |
5a69b89f DV |
2020 | if (err_int & ERR_INT_PIPE_CRC_DONE(pipe)) { |
2021 | if (IS_IVYBRIDGE(dev)) | |
277de95e | 2022 | ivb_pipe_crc_irq_handler(dev, pipe); |
5a69b89f | 2023 | else |
277de95e | 2024 | hsw_pipe_crc_irq_handler(dev, pipe); |
5a69b89f DV |
2025 | } |
2026 | } | |
8bf1e9f1 | 2027 | |
8664281b PZ |
2028 | I915_WRITE(GEN7_ERR_INT, err_int); |
2029 | } | |
2030 | ||
2031 | static void cpt_serr_int_handler(struct drm_device *dev) | |
2032 | { | |
2033 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2034 | u32 serr_int = I915_READ(SERR_INT); | |
2035 | ||
de032bf4 PZ |
2036 | if (serr_int & SERR_INT_POISON) |
2037 | DRM_ERROR("PCH poison interrupt\n"); | |
2038 | ||
8664281b | 2039 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) |
1f7247c0 | 2040 | intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_A); |
8664281b PZ |
2041 | |
2042 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) | |
1f7247c0 | 2043 | intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_B); |
8664281b PZ |
2044 | |
2045 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) | |
1f7247c0 | 2046 | intel_pch_fifo_underrun_irq_handler(dev_priv, TRANSCODER_C); |
8664281b PZ |
2047 | |
2048 | I915_WRITE(SERR_INT, serr_int); | |
776ad806 JB |
2049 | } |
2050 | ||
23e81d69 AJ |
2051 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
2052 | { | |
2d1013dd | 2053 | struct drm_i915_private *dev_priv = dev->dev_private; |
23e81d69 | 2054 | int pipe; |
b543fb04 | 2055 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
13cf5504 DA |
2056 | u32 dig_hotplug_reg; |
2057 | ||
2058 | dig_hotplug_reg = I915_READ(PCH_PORT_HOTPLUG); | |
2059 | I915_WRITE(PCH_PORT_HOTPLUG, dig_hotplug_reg); | |
23e81d69 | 2060 | |
13cf5504 | 2061 | intel_hpd_irq_handler(dev, hotplug_trigger, dig_hotplug_reg, hpd_cpt); |
91d131d2 | 2062 | |
cfc33bf7 VS |
2063 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
2064 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | |
2065 | SDE_AUDIO_POWER_SHIFT_CPT); | |
2066 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", | |
2067 | port_name(port)); | |
2068 | } | |
23e81d69 AJ |
2069 | |
2070 | if (pch_iir & SDE_AUX_MASK_CPT) | |
ce99c256 | 2071 | dp_aux_irq_handler(dev); |
23e81d69 AJ |
2072 | |
2073 | if (pch_iir & SDE_GMBUS_CPT) | |
515ac2bb | 2074 | gmbus_irq_handler(dev); |
23e81d69 AJ |
2075 | |
2076 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | |
2077 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | |
2078 | ||
2079 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | |
2080 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | |
2081 | ||
2082 | if (pch_iir & SDE_FDI_MASK_CPT) | |
055e393f | 2083 | for_each_pipe(dev_priv, pipe) |
23e81d69 AJ |
2084 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
2085 | pipe_name(pipe), | |
2086 | I915_READ(FDI_RX_IIR(pipe))); | |
8664281b PZ |
2087 | |
2088 | if (pch_iir & SDE_ERROR_CPT) | |
2089 | cpt_serr_int_handler(dev); | |
23e81d69 AJ |
2090 | } |
2091 | ||
c008bc6e PZ |
2092 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
2093 | { | |
2094 | struct drm_i915_private *dev_priv = dev->dev_private; | |
40da17c2 | 2095 | enum pipe pipe; |
c008bc6e PZ |
2096 | |
2097 | if (de_iir & DE_AUX_CHANNEL_A) | |
2098 | dp_aux_irq_handler(dev); | |
2099 | ||
2100 | if (de_iir & DE_GSE) | |
2101 | intel_opregion_asle_intr(dev); | |
2102 | ||
c008bc6e PZ |
2103 | if (de_iir & DE_POISON) |
2104 | DRM_ERROR("Poison interrupt\n"); | |
2105 | ||
055e393f | 2106 | for_each_pipe(dev_priv, pipe) { |
d6bbafa1 CW |
2107 | if (de_iir & DE_PIPE_VBLANK(pipe) && |
2108 | intel_pipe_handle_vblank(dev, pipe)) | |
2109 | intel_check_page_flip(dev, pipe); | |
5b3a856b | 2110 | |
40da17c2 | 2111 | if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe)) |
1f7247c0 | 2112 | intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); |
5b3a856b | 2113 | |
40da17c2 DV |
2114 | if (de_iir & DE_PIPE_CRC_DONE(pipe)) |
2115 | i9xx_pipe_crc_irq_handler(dev, pipe); | |
c008bc6e | 2116 | |
40da17c2 DV |
2117 | /* plane/pipes map 1:1 on ilk+ */ |
2118 | if (de_iir & DE_PLANE_FLIP_DONE(pipe)) { | |
2119 | intel_prepare_page_flip(dev, pipe); | |
2120 | intel_finish_page_flip_plane(dev, pipe); | |
2121 | } | |
c008bc6e PZ |
2122 | } |
2123 | ||
2124 | /* check event from PCH */ | |
2125 | if (de_iir & DE_PCH_EVENT) { | |
2126 | u32 pch_iir = I915_READ(SDEIIR); | |
2127 | ||
2128 | if (HAS_PCH_CPT(dev)) | |
2129 | cpt_irq_handler(dev, pch_iir); | |
2130 | else | |
2131 | ibx_irq_handler(dev, pch_iir); | |
2132 | ||
2133 | /* should clear PCH hotplug event before clear CPU irq */ | |
2134 | I915_WRITE(SDEIIR, pch_iir); | |
2135 | } | |
2136 | ||
2137 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | |
2138 | ironlake_rps_change_irq_handler(dev); | |
2139 | } | |
2140 | ||
9719fb98 PZ |
2141 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) |
2142 | { | |
2143 | struct drm_i915_private *dev_priv = dev->dev_private; | |
07d27e20 | 2144 | enum pipe pipe; |
9719fb98 PZ |
2145 | |
2146 | if (de_iir & DE_ERR_INT_IVB) | |
2147 | ivb_err_int_handler(dev); | |
2148 | ||
2149 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | |
2150 | dp_aux_irq_handler(dev); | |
2151 | ||
2152 | if (de_iir & DE_GSE_IVB) | |
2153 | intel_opregion_asle_intr(dev); | |
2154 | ||
055e393f | 2155 | for_each_pipe(dev_priv, pipe) { |
d6bbafa1 CW |
2156 | if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)) && |
2157 | intel_pipe_handle_vblank(dev, pipe)) | |
2158 | intel_check_page_flip(dev, pipe); | |
40da17c2 DV |
2159 | |
2160 | /* plane/pipes map 1:1 on ilk+ */ | |
07d27e20 DL |
2161 | if (de_iir & DE_PLANE_FLIP_DONE_IVB(pipe)) { |
2162 | intel_prepare_page_flip(dev, pipe); | |
2163 | intel_finish_page_flip_plane(dev, pipe); | |
9719fb98 PZ |
2164 | } |
2165 | } | |
2166 | ||
2167 | /* check event from PCH */ | |
2168 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | |
2169 | u32 pch_iir = I915_READ(SDEIIR); | |
2170 | ||
2171 | cpt_irq_handler(dev, pch_iir); | |
2172 | ||
2173 | /* clear PCH hotplug event before clear CPU irq */ | |
2174 | I915_WRITE(SDEIIR, pch_iir); | |
2175 | } | |
2176 | } | |
2177 | ||
72c90f62 OM |
2178 | /* |
2179 | * To handle irqs with the minimum potential races with fresh interrupts, we: | |
2180 | * 1 - Disable Master Interrupt Control. | |
2181 | * 2 - Find the source(s) of the interrupt. | |
2182 | * 3 - Clear the Interrupt Identity bits (IIR). | |
2183 | * 4 - Process the interrupt(s) that had bits set in the IIRs. | |
2184 | * 5 - Re-enable Master Interrupt Control. | |
2185 | */ | |
f1af8fc1 | 2186 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
b1f14ad0 | 2187 | { |
45a83f84 | 2188 | struct drm_device *dev = arg; |
2d1013dd | 2189 | struct drm_i915_private *dev_priv = dev->dev_private; |
f1af8fc1 | 2190 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
0e43406b | 2191 | irqreturn_t ret = IRQ_NONE; |
b1f14ad0 | 2192 | |
8664281b PZ |
2193 | /* We get interrupts on unclaimed registers, so check for this before we |
2194 | * do any I915_{READ,WRITE}. */ | |
907b28c5 | 2195 | intel_uncore_check_errors(dev); |
8664281b | 2196 | |
b1f14ad0 JB |
2197 | /* disable master interrupt before clearing iir */ |
2198 | de_ier = I915_READ(DEIER); | |
2199 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
23a78516 | 2200 | POSTING_READ(DEIER); |
b1f14ad0 | 2201 | |
44498aea PZ |
2202 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
2203 | * interrupts will will be stored on its back queue, and then we'll be | |
2204 | * able to process them after we restore SDEIER (as soon as we restore | |
2205 | * it, we'll get an interrupt if SDEIIR still has something to process | |
2206 | * due to its back queue). */ | |
ab5c608b BW |
2207 | if (!HAS_PCH_NOP(dev)) { |
2208 | sde_ier = I915_READ(SDEIER); | |
2209 | I915_WRITE(SDEIER, 0); | |
2210 | POSTING_READ(SDEIER); | |
2211 | } | |
44498aea | 2212 | |
72c90f62 OM |
2213 | /* Find, clear, then process each source of interrupt */ |
2214 | ||
b1f14ad0 | 2215 | gt_iir = I915_READ(GTIIR); |
0e43406b | 2216 | if (gt_iir) { |
72c90f62 OM |
2217 | I915_WRITE(GTIIR, gt_iir); |
2218 | ret = IRQ_HANDLED; | |
d8fc8a47 | 2219 | if (INTEL_INFO(dev)->gen >= 6) |
f1af8fc1 | 2220 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
d8fc8a47 PZ |
2221 | else |
2222 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | |
b1f14ad0 JB |
2223 | } |
2224 | ||
0e43406b CW |
2225 | de_iir = I915_READ(DEIIR); |
2226 | if (de_iir) { | |
72c90f62 OM |
2227 | I915_WRITE(DEIIR, de_iir); |
2228 | ret = IRQ_HANDLED; | |
f1af8fc1 PZ |
2229 | if (INTEL_INFO(dev)->gen >= 7) |
2230 | ivb_display_irq_handler(dev, de_iir); | |
2231 | else | |
2232 | ilk_display_irq_handler(dev, de_iir); | |
b1f14ad0 JB |
2233 | } |
2234 | ||
f1af8fc1 PZ |
2235 | if (INTEL_INFO(dev)->gen >= 6) { |
2236 | u32 pm_iir = I915_READ(GEN6_PMIIR); | |
2237 | if (pm_iir) { | |
f1af8fc1 PZ |
2238 | I915_WRITE(GEN6_PMIIR, pm_iir); |
2239 | ret = IRQ_HANDLED; | |
72c90f62 | 2240 | gen6_rps_irq_handler(dev_priv, pm_iir); |
f1af8fc1 | 2241 | } |
0e43406b | 2242 | } |
b1f14ad0 | 2243 | |
b1f14ad0 JB |
2244 | I915_WRITE(DEIER, de_ier); |
2245 | POSTING_READ(DEIER); | |
ab5c608b BW |
2246 | if (!HAS_PCH_NOP(dev)) { |
2247 | I915_WRITE(SDEIER, sde_ier); | |
2248 | POSTING_READ(SDEIER); | |
2249 | } | |
b1f14ad0 JB |
2250 | |
2251 | return ret; | |
2252 | } | |
2253 | ||
abd58f01 BW |
2254 | static irqreturn_t gen8_irq_handler(int irq, void *arg) |
2255 | { | |
2256 | struct drm_device *dev = arg; | |
2257 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2258 | u32 master_ctl; | |
2259 | irqreturn_t ret = IRQ_NONE; | |
2260 | uint32_t tmp = 0; | |
c42664cc | 2261 | enum pipe pipe; |
88e04703 JB |
2262 | u32 aux_mask = GEN8_AUX_CHANNEL_A; |
2263 | ||
2264 | if (IS_GEN9(dev)) | |
2265 | aux_mask |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | | |
2266 | GEN9_AUX_CHANNEL_D; | |
abd58f01 | 2267 | |
abd58f01 BW |
2268 | master_ctl = I915_READ(GEN8_MASTER_IRQ); |
2269 | master_ctl &= ~GEN8_MASTER_IRQ_CONTROL; | |
2270 | if (!master_ctl) | |
2271 | return IRQ_NONE; | |
2272 | ||
2273 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
2274 | POSTING_READ(GEN8_MASTER_IRQ); | |
2275 | ||
38cc46d7 OM |
2276 | /* Find, clear, then process each source of interrupt */ |
2277 | ||
abd58f01 BW |
2278 | ret = gen8_gt_irq_handler(dev, dev_priv, master_ctl); |
2279 | ||
2280 | if (master_ctl & GEN8_DE_MISC_IRQ) { | |
2281 | tmp = I915_READ(GEN8_DE_MISC_IIR); | |
abd58f01 BW |
2282 | if (tmp) { |
2283 | I915_WRITE(GEN8_DE_MISC_IIR, tmp); | |
2284 | ret = IRQ_HANDLED; | |
38cc46d7 OM |
2285 | if (tmp & GEN8_DE_MISC_GSE) |
2286 | intel_opregion_asle_intr(dev); | |
2287 | else | |
2288 | DRM_ERROR("Unexpected DE Misc interrupt\n"); | |
abd58f01 | 2289 | } |
38cc46d7 OM |
2290 | else |
2291 | DRM_ERROR("The master control interrupt lied (DE MISC)!\n"); | |
abd58f01 BW |
2292 | } |
2293 | ||
6d766f02 DV |
2294 | if (master_ctl & GEN8_DE_PORT_IRQ) { |
2295 | tmp = I915_READ(GEN8_DE_PORT_IIR); | |
6d766f02 DV |
2296 | if (tmp) { |
2297 | I915_WRITE(GEN8_DE_PORT_IIR, tmp); | |
2298 | ret = IRQ_HANDLED; | |
88e04703 JB |
2299 | |
2300 | if (tmp & aux_mask) | |
38cc46d7 OM |
2301 | dp_aux_irq_handler(dev); |
2302 | else | |
2303 | DRM_ERROR("Unexpected DE Port interrupt\n"); | |
6d766f02 | 2304 | } |
38cc46d7 OM |
2305 | else |
2306 | DRM_ERROR("The master control interrupt lied (DE PORT)!\n"); | |
6d766f02 DV |
2307 | } |
2308 | ||
055e393f | 2309 | for_each_pipe(dev_priv, pipe) { |
770de83d | 2310 | uint32_t pipe_iir, flip_done = 0, fault_errors = 0; |
abd58f01 | 2311 | |
c42664cc DV |
2312 | if (!(master_ctl & GEN8_DE_PIPE_IRQ(pipe))) |
2313 | continue; | |
abd58f01 | 2314 | |
c42664cc | 2315 | pipe_iir = I915_READ(GEN8_DE_PIPE_IIR(pipe)); |
c42664cc DV |
2316 | if (pipe_iir) { |
2317 | ret = IRQ_HANDLED; | |
2318 | I915_WRITE(GEN8_DE_PIPE_IIR(pipe), pipe_iir); | |
770de83d | 2319 | |
d6bbafa1 CW |
2320 | if (pipe_iir & GEN8_PIPE_VBLANK && |
2321 | intel_pipe_handle_vblank(dev, pipe)) | |
2322 | intel_check_page_flip(dev, pipe); | |
38cc46d7 | 2323 | |
770de83d DL |
2324 | if (IS_GEN9(dev)) |
2325 | flip_done = pipe_iir & GEN9_PIPE_PLANE1_FLIP_DONE; | |
2326 | else | |
2327 | flip_done = pipe_iir & GEN8_PIPE_PRIMARY_FLIP_DONE; | |
2328 | ||
2329 | if (flip_done) { | |
38cc46d7 OM |
2330 | intel_prepare_page_flip(dev, pipe); |
2331 | intel_finish_page_flip_plane(dev, pipe); | |
2332 | } | |
2333 | ||
2334 | if (pipe_iir & GEN8_PIPE_CDCLK_CRC_DONE) | |
2335 | hsw_pipe_crc_irq_handler(dev, pipe); | |
2336 | ||
1f7247c0 DV |
2337 | if (pipe_iir & GEN8_PIPE_FIFO_UNDERRUN) |
2338 | intel_cpu_fifo_underrun_irq_handler(dev_priv, | |
2339 | pipe); | |
38cc46d7 | 2340 | |
770de83d DL |
2341 | |
2342 | if (IS_GEN9(dev)) | |
2343 | fault_errors = pipe_iir & GEN9_DE_PIPE_IRQ_FAULT_ERRORS; | |
2344 | else | |
2345 | fault_errors = pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | |
2346 | ||
2347 | if (fault_errors) | |
38cc46d7 OM |
2348 | DRM_ERROR("Fault errors on pipe %c\n: 0x%08x", |
2349 | pipe_name(pipe), | |
2350 | pipe_iir & GEN8_DE_PIPE_IRQ_FAULT_ERRORS); | |
c42664cc | 2351 | } else |
abd58f01 BW |
2352 | DRM_ERROR("The master control interrupt lied (DE PIPE)!\n"); |
2353 | } | |
2354 | ||
92d03a80 DV |
2355 | if (!HAS_PCH_NOP(dev) && master_ctl & GEN8_DE_PCH_IRQ) { |
2356 | /* | |
2357 | * FIXME(BDW): Assume for now that the new interrupt handling | |
2358 | * scheme also closed the SDE interrupt handling race we've seen | |
2359 | * on older pch-split platforms. But this needs testing. | |
2360 | */ | |
2361 | u32 pch_iir = I915_READ(SDEIIR); | |
92d03a80 DV |
2362 | if (pch_iir) { |
2363 | I915_WRITE(SDEIIR, pch_iir); | |
2364 | ret = IRQ_HANDLED; | |
38cc46d7 OM |
2365 | cpt_irq_handler(dev, pch_iir); |
2366 | } else | |
2367 | DRM_ERROR("The master control interrupt lied (SDE)!\n"); | |
2368 | ||
92d03a80 DV |
2369 | } |
2370 | ||
abd58f01 BW |
2371 | I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); |
2372 | POSTING_READ(GEN8_MASTER_IRQ); | |
2373 | ||
2374 | return ret; | |
2375 | } | |
2376 | ||
17e1df07 DV |
2377 | static void i915_error_wake_up(struct drm_i915_private *dev_priv, |
2378 | bool reset_completed) | |
2379 | { | |
a4872ba6 | 2380 | struct intel_engine_cs *ring; |
17e1df07 DV |
2381 | int i; |
2382 | ||
2383 | /* | |
2384 | * Notify all waiters for GPU completion events that reset state has | |
2385 | * been changed, and that they need to restart their wait after | |
2386 | * checking for potential errors (and bail out to drop locks if there is | |
2387 | * a gpu reset pending so that i915_error_work_func can acquire them). | |
2388 | */ | |
2389 | ||
2390 | /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ | |
2391 | for_each_ring(ring, dev_priv, i) | |
2392 | wake_up_all(&ring->irq_queue); | |
2393 | ||
2394 | /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ | |
2395 | wake_up_all(&dev_priv->pending_flip_queue); | |
2396 | ||
2397 | /* | |
2398 | * Signal tasks blocked in i915_gem_wait_for_error that the pending | |
2399 | * reset state is cleared. | |
2400 | */ | |
2401 | if (reset_completed) | |
2402 | wake_up_all(&dev_priv->gpu_error.reset_queue); | |
2403 | } | |
2404 | ||
8a905236 JB |
2405 | /** |
2406 | * i915_error_work_func - do process context error handling work | |
2407 | * @work: work struct | |
2408 | * | |
2409 | * Fire an error uevent so userspace can see that a hang or error | |
2410 | * was detected. | |
2411 | */ | |
2412 | static void i915_error_work_func(struct work_struct *work) | |
2413 | { | |
1f83fee0 DV |
2414 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
2415 | work); | |
2d1013dd JN |
2416 | struct drm_i915_private *dev_priv = |
2417 | container_of(error, struct drm_i915_private, gpu_error); | |
8a905236 | 2418 | struct drm_device *dev = dev_priv->dev; |
cce723ed BW |
2419 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
2420 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | |
2421 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | |
17e1df07 | 2422 | int ret; |
8a905236 | 2423 | |
5bdebb18 | 2424 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, error_event); |
f316a42c | 2425 | |
7db0ba24 DV |
2426 | /* |
2427 | * Note that there's only one work item which does gpu resets, so we | |
2428 | * need not worry about concurrent gpu resets potentially incrementing | |
2429 | * error->reset_counter twice. We only need to take care of another | |
2430 | * racing irq/hangcheck declaring the gpu dead for a second time. A | |
2431 | * quick check for that is good enough: schedule_work ensures the | |
2432 | * correct ordering between hang detection and this work item, and since | |
2433 | * the reset in-progress bit is only ever set by code outside of this | |
2434 | * work we don't need to worry about any other races. | |
2435 | */ | |
2436 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | |
f803aa55 | 2437 | DRM_DEBUG_DRIVER("resetting chip\n"); |
5bdebb18 | 2438 | kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, |
7db0ba24 | 2439 | reset_event); |
1f83fee0 | 2440 | |
f454c694 ID |
2441 | /* |
2442 | * In most cases it's guaranteed that we get here with an RPM | |
2443 | * reference held, for example because there is a pending GPU | |
2444 | * request that won't finish until the reset is done. This | |
2445 | * isn't the case at least when we get here by doing a | |
2446 | * simulated reset via debugs, so get an RPM reference. | |
2447 | */ | |
2448 | intel_runtime_pm_get(dev_priv); | |
7514747d VS |
2449 | |
2450 | intel_prepare_reset(dev); | |
2451 | ||
17e1df07 DV |
2452 | /* |
2453 | * All state reset _must_ be completed before we update the | |
2454 | * reset counter, for otherwise waiters might miss the reset | |
2455 | * pending state and not properly drop locks, resulting in | |
2456 | * deadlocks with the reset work. | |
2457 | */ | |
f69061be DV |
2458 | ret = i915_reset(dev); |
2459 | ||
7514747d | 2460 | intel_finish_reset(dev); |
17e1df07 | 2461 | |
f454c694 ID |
2462 | intel_runtime_pm_put(dev_priv); |
2463 | ||
f69061be DV |
2464 | if (ret == 0) { |
2465 | /* | |
2466 | * After all the gem state is reset, increment the reset | |
2467 | * counter and wake up everyone waiting for the reset to | |
2468 | * complete. | |
2469 | * | |
2470 | * Since unlock operations are a one-sided barrier only, | |
2471 | * we need to insert a barrier here to order any seqno | |
2472 | * updates before | |
2473 | * the counter increment. | |
2474 | */ | |
4e857c58 | 2475 | smp_mb__before_atomic(); |
f69061be DV |
2476 | atomic_inc(&dev_priv->gpu_error.reset_counter); |
2477 | ||
5bdebb18 | 2478 | kobject_uevent_env(&dev->primary->kdev->kobj, |
f69061be | 2479 | KOBJ_CHANGE, reset_done_event); |
1f83fee0 | 2480 | } else { |
2ac0f450 | 2481 | atomic_set_mask(I915_WEDGED, &error->reset_counter); |
f316a42c | 2482 | } |
1f83fee0 | 2483 | |
17e1df07 DV |
2484 | /* |
2485 | * Note: The wake_up also serves as a memory barrier so that | |
2486 | * waiters see the update value of the reset counter atomic_t. | |
2487 | */ | |
2488 | i915_error_wake_up(dev_priv, true); | |
f316a42c | 2489 | } |
8a905236 JB |
2490 | } |
2491 | ||
35aed2e6 | 2492 | static void i915_report_and_clear_eir(struct drm_device *dev) |
8a905236 JB |
2493 | { |
2494 | struct drm_i915_private *dev_priv = dev->dev_private; | |
bd9854f9 | 2495 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
8a905236 | 2496 | u32 eir = I915_READ(EIR); |
050ee91f | 2497 | int pipe, i; |
8a905236 | 2498 | |
35aed2e6 CW |
2499 | if (!eir) |
2500 | return; | |
8a905236 | 2501 | |
a70491cc | 2502 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
8a905236 | 2503 | |
bd9854f9 BW |
2504 | i915_get_extra_instdone(dev, instdone); |
2505 | ||
8a905236 JB |
2506 | if (IS_G4X(dev)) { |
2507 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | |
2508 | u32 ipeir = I915_READ(IPEIR_I965); | |
2509 | ||
a70491cc JP |
2510 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2511 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
050ee91f BW |
2512 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2513 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a70491cc | 2514 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 2515 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 2516 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 2517 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
2518 | } |
2519 | if (eir & GM45_ERROR_PAGE_TABLE) { | |
2520 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
2521 | pr_err("page table error\n"); |
2522 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 2523 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 2524 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
2525 | } |
2526 | } | |
2527 | ||
a6c45cf0 | 2528 | if (!IS_GEN2(dev)) { |
8a905236 JB |
2529 | if (eir & I915_ERROR_PAGE_TABLE) { |
2530 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
2531 | pr_err("page table error\n"); |
2532 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 2533 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 2534 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
2535 | } |
2536 | } | |
2537 | ||
2538 | if (eir & I915_ERROR_MEMORY_REFRESH) { | |
a70491cc | 2539 | pr_err("memory refresh error:\n"); |
055e393f | 2540 | for_each_pipe(dev_priv, pipe) |
a70491cc | 2541 | pr_err("pipe %c stat: 0x%08x\n", |
9db4a9c7 | 2542 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
8a905236 JB |
2543 | /* pipestat has already been acked */ |
2544 | } | |
2545 | if (eir & I915_ERROR_INSTRUCTION) { | |
a70491cc JP |
2546 | pr_err("instruction error\n"); |
2547 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | |
050ee91f BW |
2548 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2549 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a6c45cf0 | 2550 | if (INTEL_INFO(dev)->gen < 4) { |
8a905236 JB |
2551 | u32 ipeir = I915_READ(IPEIR); |
2552 | ||
a70491cc JP |
2553 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
2554 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | |
a70491cc | 2555 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
8a905236 | 2556 | I915_WRITE(IPEIR, ipeir); |
3143a2bf | 2557 | POSTING_READ(IPEIR); |
8a905236 JB |
2558 | } else { |
2559 | u32 ipeir = I915_READ(IPEIR_I965); | |
2560 | ||
a70491cc JP |
2561 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2562 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
a70491cc | 2563 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 2564 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 2565 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 2566 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
2567 | } |
2568 | } | |
2569 | ||
2570 | I915_WRITE(EIR, eir); | |
3143a2bf | 2571 | POSTING_READ(EIR); |
8a905236 JB |
2572 | eir = I915_READ(EIR); |
2573 | if (eir) { | |
2574 | /* | |
2575 | * some errors might have become stuck, | |
2576 | * mask them. | |
2577 | */ | |
2578 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | |
2579 | I915_WRITE(EMR, I915_READ(EMR) | eir); | |
2580 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2581 | } | |
35aed2e6 CW |
2582 | } |
2583 | ||
2584 | /** | |
2585 | * i915_handle_error - handle an error interrupt | |
2586 | * @dev: drm device | |
2587 | * | |
2588 | * Do some basic checking of regsiter state at error interrupt time and | |
2589 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
2590 | * sure we get a record and make it available in debugfs. Fire a uevent | |
2591 | * so userspace knows something bad happened (should trigger collection | |
2592 | * of a ring dump etc.). | |
2593 | */ | |
58174462 MK |
2594 | void i915_handle_error(struct drm_device *dev, bool wedged, |
2595 | const char *fmt, ...) | |
35aed2e6 CW |
2596 | { |
2597 | struct drm_i915_private *dev_priv = dev->dev_private; | |
58174462 MK |
2598 | va_list args; |
2599 | char error_msg[80]; | |
35aed2e6 | 2600 | |
58174462 MK |
2601 | va_start(args, fmt); |
2602 | vscnprintf(error_msg, sizeof(error_msg), fmt, args); | |
2603 | va_end(args); | |
2604 | ||
2605 | i915_capture_error_state(dev, wedged, error_msg); | |
35aed2e6 | 2606 | i915_report_and_clear_eir(dev); |
8a905236 | 2607 | |
ba1234d1 | 2608 | if (wedged) { |
f69061be DV |
2609 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
2610 | &dev_priv->gpu_error.reset_counter); | |
ba1234d1 | 2611 | |
11ed50ec | 2612 | /* |
17e1df07 DV |
2613 | * Wakeup waiting processes so that the reset work function |
2614 | * i915_error_work_func doesn't deadlock trying to grab various | |
2615 | * locks. By bumping the reset counter first, the woken | |
2616 | * processes will see a reset in progress and back off, | |
2617 | * releasing their locks and then wait for the reset completion. | |
2618 | * We must do this for _all_ gpu waiters that might hold locks | |
2619 | * that the reset work needs to acquire. | |
2620 | * | |
2621 | * Note: The wake_up serves as the required memory barrier to | |
2622 | * ensure that the waiters see the updated value of the reset | |
2623 | * counter atomic_t. | |
11ed50ec | 2624 | */ |
17e1df07 | 2625 | i915_error_wake_up(dev_priv, false); |
11ed50ec BG |
2626 | } |
2627 | ||
122f46ba DV |
2628 | /* |
2629 | * Our reset work can grab modeset locks (since it needs to reset the | |
2630 | * state of outstanding pagelips). Hence it must not be run on our own | |
2631 | * dev-priv->wq work queue for otherwise the flush_work in the pageflip | |
2632 | * code will deadlock. | |
2633 | */ | |
2634 | schedule_work(&dev_priv->gpu_error.work); | |
8a905236 JB |
2635 | } |
2636 | ||
42f52ef8 KP |
2637 | /* Called from drm generic code, passed 'crtc' which |
2638 | * we use as a pipe index | |
2639 | */ | |
f71d4af4 | 2640 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 | 2641 | { |
2d1013dd | 2642 | struct drm_i915_private *dev_priv = dev->dev_private; |
e9d21d7f | 2643 | unsigned long irqflags; |
71e0ffa5 | 2644 | |
5eddb70b | 2645 | if (!i915_pipe_enabled(dev, pipe)) |
71e0ffa5 | 2646 | return -EINVAL; |
0a3e67a4 | 2647 | |
1ec14ad3 | 2648 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
f796cf8f | 2649 | if (INTEL_INFO(dev)->gen >= 4) |
7c463586 | 2650 | i915_enable_pipestat(dev_priv, pipe, |
755e9019 | 2651 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
e9d21d7f | 2652 | else |
7c463586 | 2653 | i915_enable_pipestat(dev_priv, pipe, |
755e9019 | 2654 | PIPE_VBLANK_INTERRUPT_STATUS); |
1ec14ad3 | 2655 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
8692d00e | 2656 | |
0a3e67a4 JB |
2657 | return 0; |
2658 | } | |
2659 | ||
f71d4af4 | 2660 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
f796cf8f | 2661 | { |
2d1013dd | 2662 | struct drm_i915_private *dev_priv = dev->dev_private; |
f796cf8f | 2663 | unsigned long irqflags; |
b518421f | 2664 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
40da17c2 | 2665 | DE_PIPE_VBLANK(pipe); |
f796cf8f JB |
2666 | |
2667 | if (!i915_pipe_enabled(dev, pipe)) | |
2668 | return -EINVAL; | |
2669 | ||
2670 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b518421f | 2671 | ironlake_enable_display_irq(dev_priv, bit); |
b1f14ad0 JB |
2672 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2673 | ||
2674 | return 0; | |
2675 | } | |
2676 | ||
7e231dbe JB |
2677 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
2678 | { | |
2d1013dd | 2679 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe | 2680 | unsigned long irqflags; |
7e231dbe JB |
2681 | |
2682 | if (!i915_pipe_enabled(dev, pipe)) | |
2683 | return -EINVAL; | |
2684 | ||
2685 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 | 2686 | i915_enable_pipestat(dev_priv, pipe, |
755e9019 | 2687 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
7e231dbe JB |
2688 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2689 | ||
2690 | return 0; | |
2691 | } | |
2692 | ||
abd58f01 BW |
2693 | static int gen8_enable_vblank(struct drm_device *dev, int pipe) |
2694 | { | |
2695 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2696 | unsigned long irqflags; | |
abd58f01 BW |
2697 | |
2698 | if (!i915_pipe_enabled(dev, pipe)) | |
2699 | return -EINVAL; | |
2700 | ||
2701 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
7167d7c6 DV |
2702 | dev_priv->de_irq_mask[pipe] &= ~GEN8_PIPE_VBLANK; |
2703 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
2704 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
abd58f01 BW |
2705 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2706 | return 0; | |
2707 | } | |
2708 | ||
42f52ef8 KP |
2709 | /* Called from drm generic code, passed 'crtc' which |
2710 | * we use as a pipe index | |
2711 | */ | |
f71d4af4 | 2712 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 | 2713 | { |
2d1013dd | 2714 | struct drm_i915_private *dev_priv = dev->dev_private; |
e9d21d7f | 2715 | unsigned long irqflags; |
0a3e67a4 | 2716 | |
1ec14ad3 | 2717 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
f796cf8f | 2718 | i915_disable_pipestat(dev_priv, pipe, |
755e9019 ID |
2719 | PIPE_VBLANK_INTERRUPT_STATUS | |
2720 | PIPE_START_VBLANK_INTERRUPT_STATUS); | |
f796cf8f JB |
2721 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2722 | } | |
2723 | ||
f71d4af4 | 2724 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
f796cf8f | 2725 | { |
2d1013dd | 2726 | struct drm_i915_private *dev_priv = dev->dev_private; |
f796cf8f | 2727 | unsigned long irqflags; |
b518421f | 2728 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
40da17c2 | 2729 | DE_PIPE_VBLANK(pipe); |
f796cf8f JB |
2730 | |
2731 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b518421f | 2732 | ironlake_disable_display_irq(dev_priv, bit); |
b1f14ad0 JB |
2733 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2734 | } | |
2735 | ||
7e231dbe JB |
2736 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
2737 | { | |
2d1013dd | 2738 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe | 2739 | unsigned long irqflags; |
7e231dbe JB |
2740 | |
2741 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 | 2742 | i915_disable_pipestat(dev_priv, pipe, |
755e9019 | 2743 | PIPE_START_VBLANK_INTERRUPT_STATUS); |
7e231dbe JB |
2744 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2745 | } | |
2746 | ||
abd58f01 BW |
2747 | static void gen8_disable_vblank(struct drm_device *dev, int pipe) |
2748 | { | |
2749 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2750 | unsigned long irqflags; | |
abd58f01 BW |
2751 | |
2752 | if (!i915_pipe_enabled(dev, pipe)) | |
2753 | return; | |
2754 | ||
2755 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
7167d7c6 DV |
2756 | dev_priv->de_irq_mask[pipe] |= GEN8_PIPE_VBLANK; |
2757 | I915_WRITE(GEN8_DE_PIPE_IMR(pipe), dev_priv->de_irq_mask[pipe]); | |
2758 | POSTING_READ(GEN8_DE_PIPE_IMR(pipe)); | |
abd58f01 BW |
2759 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2760 | } | |
2761 | ||
44cdd6d2 JH |
2762 | static struct drm_i915_gem_request * |
2763 | ring_last_request(struct intel_engine_cs *ring) | |
852835f3 | 2764 | { |
893eead0 | 2765 | return list_entry(ring->request_list.prev, |
44cdd6d2 | 2766 | struct drm_i915_gem_request, list); |
893eead0 CW |
2767 | } |
2768 | ||
9107e9d2 | 2769 | static bool |
44cdd6d2 | 2770 | ring_idle(struct intel_engine_cs *ring) |
9107e9d2 CW |
2771 | { |
2772 | return (list_empty(&ring->request_list) || | |
1b5a433a | 2773 | i915_gem_request_completed(ring_last_request(ring), false)); |
f65d9421 BG |
2774 | } |
2775 | ||
a028c4b0 DV |
2776 | static bool |
2777 | ipehr_is_semaphore_wait(struct drm_device *dev, u32 ipehr) | |
2778 | { | |
2779 | if (INTEL_INFO(dev)->gen >= 8) { | |
a6cdb93a | 2780 | return (ipehr >> 23) == 0x1c; |
a028c4b0 DV |
2781 | } else { |
2782 | ipehr &= ~MI_SEMAPHORE_SYNC_MASK; | |
2783 | return ipehr == (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | | |
2784 | MI_SEMAPHORE_REGISTER); | |
2785 | } | |
2786 | } | |
2787 | ||
a4872ba6 | 2788 | static struct intel_engine_cs * |
a6cdb93a | 2789 | semaphore_wait_to_signaller_ring(struct intel_engine_cs *ring, u32 ipehr, u64 offset) |
921d42ea DV |
2790 | { |
2791 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
a4872ba6 | 2792 | struct intel_engine_cs *signaller; |
921d42ea DV |
2793 | int i; |
2794 | ||
2795 | if (INTEL_INFO(dev_priv->dev)->gen >= 8) { | |
a6cdb93a RV |
2796 | for_each_ring(signaller, dev_priv, i) { |
2797 | if (ring == signaller) | |
2798 | continue; | |
2799 | ||
2800 | if (offset == signaller->semaphore.signal_ggtt[ring->id]) | |
2801 | return signaller; | |
2802 | } | |
921d42ea DV |
2803 | } else { |
2804 | u32 sync_bits = ipehr & MI_SEMAPHORE_SYNC_MASK; | |
2805 | ||
2806 | for_each_ring(signaller, dev_priv, i) { | |
2807 | if(ring == signaller) | |
2808 | continue; | |
2809 | ||
ebc348b2 | 2810 | if (sync_bits == signaller->semaphore.mbox.wait[ring->id]) |
921d42ea DV |
2811 | return signaller; |
2812 | } | |
2813 | } | |
2814 | ||
a6cdb93a RV |
2815 | DRM_ERROR("No signaller ring found for ring %i, ipehr 0x%08x, offset 0x%016llx\n", |
2816 | ring->id, ipehr, offset); | |
921d42ea DV |
2817 | |
2818 | return NULL; | |
2819 | } | |
2820 | ||
a4872ba6 OM |
2821 | static struct intel_engine_cs * |
2822 | semaphore_waits_for(struct intel_engine_cs *ring, u32 *seqno) | |
a24a11e6 CW |
2823 | { |
2824 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
88fe429d | 2825 | u32 cmd, ipehr, head; |
a6cdb93a RV |
2826 | u64 offset = 0; |
2827 | int i, backwards; | |
a24a11e6 CW |
2828 | |
2829 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); | |
a028c4b0 | 2830 | if (!ipehr_is_semaphore_wait(ring->dev, ipehr)) |
6274f212 | 2831 | return NULL; |
a24a11e6 | 2832 | |
88fe429d DV |
2833 | /* |
2834 | * HEAD is likely pointing to the dword after the actual command, | |
2835 | * so scan backwards until we find the MBOX. But limit it to just 3 | |
a6cdb93a RV |
2836 | * or 4 dwords depending on the semaphore wait command size. |
2837 | * Note that we don't care about ACTHD here since that might | |
88fe429d DV |
2838 | * point at at batch, and semaphores are always emitted into the |
2839 | * ringbuffer itself. | |
a24a11e6 | 2840 | */ |
88fe429d | 2841 | head = I915_READ_HEAD(ring) & HEAD_ADDR; |
a6cdb93a | 2842 | backwards = (INTEL_INFO(ring->dev)->gen >= 8) ? 5 : 4; |
88fe429d | 2843 | |
a6cdb93a | 2844 | for (i = backwards; i; --i) { |
88fe429d DV |
2845 | /* |
2846 | * Be paranoid and presume the hw has gone off into the wild - | |
2847 | * our ring is smaller than what the hardware (and hence | |
2848 | * HEAD_ADDR) allows. Also handles wrap-around. | |
2849 | */ | |
ee1b1e5e | 2850 | head &= ring->buffer->size - 1; |
88fe429d DV |
2851 | |
2852 | /* This here seems to blow up */ | |
ee1b1e5e | 2853 | cmd = ioread32(ring->buffer->virtual_start + head); |
a24a11e6 CW |
2854 | if (cmd == ipehr) |
2855 | break; | |
2856 | ||
88fe429d DV |
2857 | head -= 4; |
2858 | } | |
a24a11e6 | 2859 | |
88fe429d DV |
2860 | if (!i) |
2861 | return NULL; | |
a24a11e6 | 2862 | |
ee1b1e5e | 2863 | *seqno = ioread32(ring->buffer->virtual_start + head + 4) + 1; |
a6cdb93a RV |
2864 | if (INTEL_INFO(ring->dev)->gen >= 8) { |
2865 | offset = ioread32(ring->buffer->virtual_start + head + 12); | |
2866 | offset <<= 32; | |
2867 | offset = ioread32(ring->buffer->virtual_start + head + 8); | |
2868 | } | |
2869 | return semaphore_wait_to_signaller_ring(ring, ipehr, offset); | |
a24a11e6 CW |
2870 | } |
2871 | ||
a4872ba6 | 2872 | static int semaphore_passed(struct intel_engine_cs *ring) |
6274f212 CW |
2873 | { |
2874 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
a4872ba6 | 2875 | struct intel_engine_cs *signaller; |
a0d036b0 | 2876 | u32 seqno; |
6274f212 | 2877 | |
4be17381 | 2878 | ring->hangcheck.deadlock++; |
6274f212 CW |
2879 | |
2880 | signaller = semaphore_waits_for(ring, &seqno); | |
4be17381 CW |
2881 | if (signaller == NULL) |
2882 | return -1; | |
2883 | ||
2884 | /* Prevent pathological recursion due to driver bugs */ | |
2885 | if (signaller->hangcheck.deadlock >= I915_NUM_RINGS) | |
6274f212 CW |
2886 | return -1; |
2887 | ||
4be17381 CW |
2888 | if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno)) |
2889 | return 1; | |
2890 | ||
a0d036b0 CW |
2891 | /* cursory check for an unkickable deadlock */ |
2892 | if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE && | |
2893 | semaphore_passed(signaller) < 0) | |
4be17381 CW |
2894 | return -1; |
2895 | ||
2896 | return 0; | |
6274f212 CW |
2897 | } |
2898 | ||
2899 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) | |
2900 | { | |
a4872ba6 | 2901 | struct intel_engine_cs *ring; |
6274f212 CW |
2902 | int i; |
2903 | ||
2904 | for_each_ring(ring, dev_priv, i) | |
4be17381 | 2905 | ring->hangcheck.deadlock = 0; |
6274f212 CW |
2906 | } |
2907 | ||
ad8beaea | 2908 | static enum intel_ring_hangcheck_action |
a4872ba6 | 2909 | ring_stuck(struct intel_engine_cs *ring, u64 acthd) |
1ec14ad3 CW |
2910 | { |
2911 | struct drm_device *dev = ring->dev; | |
2912 | struct drm_i915_private *dev_priv = dev->dev_private; | |
9107e9d2 CW |
2913 | u32 tmp; |
2914 | ||
f260fe7b MK |
2915 | if (acthd != ring->hangcheck.acthd) { |
2916 | if (acthd > ring->hangcheck.max_acthd) { | |
2917 | ring->hangcheck.max_acthd = acthd; | |
2918 | return HANGCHECK_ACTIVE; | |
2919 | } | |
2920 | ||
2921 | return HANGCHECK_ACTIVE_LOOP; | |
2922 | } | |
6274f212 | 2923 | |
9107e9d2 | 2924 | if (IS_GEN2(dev)) |
f2f4d82f | 2925 | return HANGCHECK_HUNG; |
9107e9d2 CW |
2926 | |
2927 | /* Is the chip hanging on a WAIT_FOR_EVENT? | |
2928 | * If so we can simply poke the RB_WAIT bit | |
2929 | * and break the hang. This should work on | |
2930 | * all but the second generation chipsets. | |
2931 | */ | |
2932 | tmp = I915_READ_CTL(ring); | |
1ec14ad3 | 2933 | if (tmp & RING_WAIT) { |
58174462 MK |
2934 | i915_handle_error(dev, false, |
2935 | "Kicking stuck wait on %s", | |
2936 | ring->name); | |
1ec14ad3 | 2937 | I915_WRITE_CTL(ring, tmp); |
f2f4d82f | 2938 | return HANGCHECK_KICK; |
6274f212 CW |
2939 | } |
2940 | ||
2941 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { | |
2942 | switch (semaphore_passed(ring)) { | |
2943 | default: | |
f2f4d82f | 2944 | return HANGCHECK_HUNG; |
6274f212 | 2945 | case 1: |
58174462 MK |
2946 | i915_handle_error(dev, false, |
2947 | "Kicking stuck semaphore on %s", | |
2948 | ring->name); | |
6274f212 | 2949 | I915_WRITE_CTL(ring, tmp); |
f2f4d82f | 2950 | return HANGCHECK_KICK; |
6274f212 | 2951 | case 0: |
f2f4d82f | 2952 | return HANGCHECK_WAIT; |
6274f212 | 2953 | } |
9107e9d2 | 2954 | } |
ed5cbb03 | 2955 | |
f2f4d82f | 2956 | return HANGCHECK_HUNG; |
ed5cbb03 MK |
2957 | } |
2958 | ||
f65d9421 BG |
2959 | /** |
2960 | * This is called when the chip hasn't reported back with completed | |
05407ff8 MK |
2961 | * batchbuffers in a long time. We keep track per ring seqno progress and |
2962 | * if there are no progress, hangcheck score for that ring is increased. | |
2963 | * Further, acthd is inspected to see if the ring is stuck. On stuck case | |
2964 | * we kick the ring. If we see no progress on three subsequent calls | |
2965 | * we assume chip is wedged and try to fix it by resetting the chip. | |
f65d9421 | 2966 | */ |
a658b5d2 | 2967 | static void i915_hangcheck_elapsed(unsigned long data) |
f65d9421 BG |
2968 | { |
2969 | struct drm_device *dev = (struct drm_device *)data; | |
2d1013dd | 2970 | struct drm_i915_private *dev_priv = dev->dev_private; |
a4872ba6 | 2971 | struct intel_engine_cs *ring; |
b4519513 | 2972 | int i; |
05407ff8 | 2973 | int busy_count = 0, rings_hung = 0; |
9107e9d2 CW |
2974 | bool stuck[I915_NUM_RINGS] = { 0 }; |
2975 | #define BUSY 1 | |
2976 | #define KICK 5 | |
2977 | #define HUNG 20 | |
893eead0 | 2978 | |
d330a953 | 2979 | if (!i915.enable_hangcheck) |
3e0dc6b0 BW |
2980 | return; |
2981 | ||
b4519513 | 2982 | for_each_ring(ring, dev_priv, i) { |
50877445 CW |
2983 | u64 acthd; |
2984 | u32 seqno; | |
9107e9d2 | 2985 | bool busy = true; |
05407ff8 | 2986 | |
6274f212 CW |
2987 | semaphore_clear_deadlocks(dev_priv); |
2988 | ||
05407ff8 MK |
2989 | seqno = ring->get_seqno(ring, false); |
2990 | acthd = intel_ring_get_active_head(ring); | |
b4519513 | 2991 | |
9107e9d2 | 2992 | if (ring->hangcheck.seqno == seqno) { |
44cdd6d2 | 2993 | if (ring_idle(ring)) { |
da661464 MK |
2994 | ring->hangcheck.action = HANGCHECK_IDLE; |
2995 | ||
9107e9d2 CW |
2996 | if (waitqueue_active(&ring->irq_queue)) { |
2997 | /* Issue a wake-up to catch stuck h/w. */ | |
094f9a54 | 2998 | if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { |
f4adcd24 DV |
2999 | if (!(dev_priv->gpu_error.test_irq_rings & intel_ring_flag(ring))) |
3000 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |
3001 | ring->name); | |
3002 | else | |
3003 | DRM_INFO("Fake missed irq on %s\n", | |
3004 | ring->name); | |
094f9a54 CW |
3005 | wake_up_all(&ring->irq_queue); |
3006 | } | |
3007 | /* Safeguard against driver failure */ | |
3008 | ring->hangcheck.score += BUSY; | |
9107e9d2 CW |
3009 | } else |
3010 | busy = false; | |
05407ff8 | 3011 | } else { |
6274f212 CW |
3012 | /* We always increment the hangcheck score |
3013 | * if the ring is busy and still processing | |
3014 | * the same request, so that no single request | |
3015 | * can run indefinitely (such as a chain of | |
3016 | * batches). The only time we do not increment | |
3017 | * the hangcheck score on this ring, if this | |
3018 | * ring is in a legitimate wait for another | |
3019 | * ring. In that case the waiting ring is a | |
3020 | * victim and we want to be sure we catch the | |
3021 | * right culprit. Then every time we do kick | |
3022 | * the ring, add a small increment to the | |
3023 | * score so that we can catch a batch that is | |
3024 | * being repeatedly kicked and so responsible | |
3025 | * for stalling the machine. | |
3026 | */ | |
ad8beaea MK |
3027 | ring->hangcheck.action = ring_stuck(ring, |
3028 | acthd); | |
3029 | ||
3030 | switch (ring->hangcheck.action) { | |
da661464 | 3031 | case HANGCHECK_IDLE: |
f2f4d82f | 3032 | case HANGCHECK_WAIT: |
f2f4d82f | 3033 | case HANGCHECK_ACTIVE: |
f260fe7b MK |
3034 | break; |
3035 | case HANGCHECK_ACTIVE_LOOP: | |
ea04cb31 | 3036 | ring->hangcheck.score += BUSY; |
6274f212 | 3037 | break; |
f2f4d82f | 3038 | case HANGCHECK_KICK: |
ea04cb31 | 3039 | ring->hangcheck.score += KICK; |
6274f212 | 3040 | break; |
f2f4d82f | 3041 | case HANGCHECK_HUNG: |
ea04cb31 | 3042 | ring->hangcheck.score += HUNG; |
6274f212 CW |
3043 | stuck[i] = true; |
3044 | break; | |
3045 | } | |
05407ff8 | 3046 | } |
9107e9d2 | 3047 | } else { |
da661464 MK |
3048 | ring->hangcheck.action = HANGCHECK_ACTIVE; |
3049 | ||
9107e9d2 CW |
3050 | /* Gradually reduce the count so that we catch DoS |
3051 | * attempts across multiple batches. | |
3052 | */ | |
3053 | if (ring->hangcheck.score > 0) | |
3054 | ring->hangcheck.score--; | |
f260fe7b MK |
3055 | |
3056 | ring->hangcheck.acthd = ring->hangcheck.max_acthd = 0; | |
d1e61e7f CW |
3057 | } |
3058 | ||
05407ff8 MK |
3059 | ring->hangcheck.seqno = seqno; |
3060 | ring->hangcheck.acthd = acthd; | |
9107e9d2 | 3061 | busy_count += busy; |
893eead0 | 3062 | } |
b9201c14 | 3063 | |
92cab734 | 3064 | for_each_ring(ring, dev_priv, i) { |
b6b0fac0 | 3065 | if (ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG) { |
b8d88d1d DV |
3066 | DRM_INFO("%s on %s\n", |
3067 | stuck[i] ? "stuck" : "no progress", | |
3068 | ring->name); | |
a43adf07 | 3069 | rings_hung++; |
92cab734 MK |
3070 | } |
3071 | } | |
3072 | ||
05407ff8 | 3073 | if (rings_hung) |
58174462 | 3074 | return i915_handle_error(dev, true, "Ring hung"); |
f65d9421 | 3075 | |
05407ff8 MK |
3076 | if (busy_count) |
3077 | /* Reset timer case chip hangs without another request | |
3078 | * being added */ | |
10cd45b6 MK |
3079 | i915_queue_hangcheck(dev); |
3080 | } | |
3081 | ||
3082 | void i915_queue_hangcheck(struct drm_device *dev) | |
3083 | { | |
3084 | struct drm_i915_private *dev_priv = dev->dev_private; | |
672e7b7c CW |
3085 | struct timer_list *timer = &dev_priv->gpu_error.hangcheck_timer; |
3086 | ||
d330a953 | 3087 | if (!i915.enable_hangcheck) |
10cd45b6 MK |
3088 | return; |
3089 | ||
672e7b7c | 3090 | /* Don't continually defer the hangcheck, but make sure it is active */ |
d9e600b2 CW |
3091 | if (timer_pending(timer)) |
3092 | return; | |
3093 | mod_timer(timer, | |
3094 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | |
f65d9421 BG |
3095 | } |
3096 | ||
1c69eb42 | 3097 | static void ibx_irq_reset(struct drm_device *dev) |
91738a95 PZ |
3098 | { |
3099 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3100 | ||
3101 | if (HAS_PCH_NOP(dev)) | |
3102 | return; | |
3103 | ||
f86f3fb0 | 3104 | GEN5_IRQ_RESET(SDE); |
105b122e PZ |
3105 | |
3106 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) | |
3107 | I915_WRITE(SERR_INT, 0xffffffff); | |
622364b6 | 3108 | } |
105b122e | 3109 | |
622364b6 PZ |
3110 | /* |
3111 | * SDEIER is also touched by the interrupt handler to work around missed PCH | |
3112 | * interrupts. Hence we can't update it after the interrupt handler is enabled - | |
3113 | * instead we unconditionally enable all PCH interrupt sources here, but then | |
3114 | * only unmask them as needed with SDEIMR. | |
3115 | * | |
3116 | * This function needs to be called before interrupts are enabled. | |
3117 | */ | |
3118 | static void ibx_irq_pre_postinstall(struct drm_device *dev) | |
3119 | { | |
3120 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3121 | ||
3122 | if (HAS_PCH_NOP(dev)) | |
3123 | return; | |
3124 | ||
3125 | WARN_ON(I915_READ(SDEIER) != 0); | |
91738a95 PZ |
3126 | I915_WRITE(SDEIER, 0xffffffff); |
3127 | POSTING_READ(SDEIER); | |
3128 | } | |
3129 | ||
7c4d664e | 3130 | static void gen5_gt_irq_reset(struct drm_device *dev) |
d18ea1b5 DV |
3131 | { |
3132 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3133 | ||
f86f3fb0 | 3134 | GEN5_IRQ_RESET(GT); |
a9d356a6 | 3135 | if (INTEL_INFO(dev)->gen >= 6) |
f86f3fb0 | 3136 | GEN5_IRQ_RESET(GEN6_PM); |
d18ea1b5 DV |
3137 | } |
3138 | ||
1da177e4 LT |
3139 | /* drm_dma.h hooks |
3140 | */ | |
be30b29f | 3141 | static void ironlake_irq_reset(struct drm_device *dev) |
036a4a7d | 3142 | { |
2d1013dd | 3143 | struct drm_i915_private *dev_priv = dev->dev_private; |
036a4a7d | 3144 | |
0c841212 | 3145 | I915_WRITE(HWSTAM, 0xffffffff); |
bdfcdb63 | 3146 | |
f86f3fb0 | 3147 | GEN5_IRQ_RESET(DE); |
c6d954c1 PZ |
3148 | if (IS_GEN7(dev)) |
3149 | I915_WRITE(GEN7_ERR_INT, 0xffffffff); | |
036a4a7d | 3150 | |
7c4d664e | 3151 | gen5_gt_irq_reset(dev); |
c650156a | 3152 | |
1c69eb42 | 3153 | ibx_irq_reset(dev); |
7d99163d | 3154 | } |
c650156a | 3155 | |
70591a41 VS |
3156 | static void vlv_display_irq_reset(struct drm_i915_private *dev_priv) |
3157 | { | |
3158 | enum pipe pipe; | |
3159 | ||
3160 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3161 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3162 | ||
3163 | for_each_pipe(dev_priv, pipe) | |
3164 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
3165 | ||
3166 | GEN5_IRQ_RESET(VLV_); | |
3167 | } | |
3168 | ||
7e231dbe JB |
3169 | static void valleyview_irq_preinstall(struct drm_device *dev) |
3170 | { | |
2d1013dd | 3171 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe | 3172 | |
7e231dbe JB |
3173 | /* VLV magic */ |
3174 | I915_WRITE(VLV_IMR, 0); | |
3175 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | |
3176 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | |
3177 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | |
3178 | ||
7c4d664e | 3179 | gen5_gt_irq_reset(dev); |
7e231dbe | 3180 | |
7c4cde39 | 3181 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); |
7e231dbe | 3182 | |
70591a41 | 3183 | vlv_display_irq_reset(dev_priv); |
7e231dbe JB |
3184 | } |
3185 | ||
d6e3cca3 DV |
3186 | static void gen8_gt_irq_reset(struct drm_i915_private *dev_priv) |
3187 | { | |
3188 | GEN8_IRQ_RESET_NDX(GT, 0); | |
3189 | GEN8_IRQ_RESET_NDX(GT, 1); | |
3190 | GEN8_IRQ_RESET_NDX(GT, 2); | |
3191 | GEN8_IRQ_RESET_NDX(GT, 3); | |
3192 | } | |
3193 | ||
823f6b38 | 3194 | static void gen8_irq_reset(struct drm_device *dev) |
abd58f01 BW |
3195 | { |
3196 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3197 | int pipe; | |
3198 | ||
abd58f01 BW |
3199 | I915_WRITE(GEN8_MASTER_IRQ, 0); |
3200 | POSTING_READ(GEN8_MASTER_IRQ); | |
3201 | ||
d6e3cca3 | 3202 | gen8_gt_irq_reset(dev_priv); |
abd58f01 | 3203 | |
055e393f | 3204 | for_each_pipe(dev_priv, pipe) |
f458ebbc DV |
3205 | if (intel_display_power_is_enabled(dev_priv, |
3206 | POWER_DOMAIN_PIPE(pipe))) | |
813bde43 | 3207 | GEN8_IRQ_RESET_NDX(DE_PIPE, pipe); |
abd58f01 | 3208 | |
f86f3fb0 PZ |
3209 | GEN5_IRQ_RESET(GEN8_DE_PORT_); |
3210 | GEN5_IRQ_RESET(GEN8_DE_MISC_); | |
3211 | GEN5_IRQ_RESET(GEN8_PCU_); | |
abd58f01 | 3212 | |
1c69eb42 | 3213 | ibx_irq_reset(dev); |
abd58f01 | 3214 | } |
09f2344d | 3215 | |
d49bdb0e PZ |
3216 | void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) |
3217 | { | |
1180e206 | 3218 | uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; |
d49bdb0e | 3219 | |
13321786 | 3220 | spin_lock_irq(&dev_priv->irq_lock); |
d49bdb0e | 3221 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], |
1180e206 | 3222 | ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); |
d49bdb0e | 3223 | GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], |
1180e206 | 3224 | ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); |
13321786 | 3225 | spin_unlock_irq(&dev_priv->irq_lock); |
d49bdb0e PZ |
3226 | } |
3227 | ||
43f328d7 VS |
3228 | static void cherryview_irq_preinstall(struct drm_device *dev) |
3229 | { | |
3230 | struct drm_i915_private *dev_priv = dev->dev_private; | |
43f328d7 VS |
3231 | |
3232 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
3233 | POSTING_READ(GEN8_MASTER_IRQ); | |
3234 | ||
d6e3cca3 | 3235 | gen8_gt_irq_reset(dev_priv); |
43f328d7 VS |
3236 | |
3237 | GEN5_IRQ_RESET(GEN8_PCU_); | |
3238 | ||
43f328d7 VS |
3239 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK_CHV); |
3240 | ||
70591a41 | 3241 | vlv_display_irq_reset(dev_priv); |
43f328d7 VS |
3242 | } |
3243 | ||
82a28bcf | 3244 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
7fe0b973 | 3245 | { |
2d1013dd | 3246 | struct drm_i915_private *dev_priv = dev->dev_private; |
82a28bcf | 3247 | struct intel_encoder *intel_encoder; |
fee884ed | 3248 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; |
82a28bcf DV |
3249 | |
3250 | if (HAS_PCH_IBX(dev)) { | |
fee884ed | 3251 | hotplug_irqs = SDE_HOTPLUG_MASK; |
b2784e15 | 3252 | for_each_intel_encoder(dev, intel_encoder) |
cd569aed | 3253 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 3254 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
82a28bcf | 3255 | } else { |
fee884ed | 3256 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
b2784e15 | 3257 | for_each_intel_encoder(dev, intel_encoder) |
cd569aed | 3258 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 3259 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
82a28bcf | 3260 | } |
7fe0b973 | 3261 | |
fee884ed | 3262 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
82a28bcf DV |
3263 | |
3264 | /* | |
3265 | * Enable digital hotplug on the PCH, and configure the DP short pulse | |
3266 | * duration to 2ms (which is the minimum in the Display Port spec) | |
3267 | * | |
3268 | * This register is the same on all known PCH chips. | |
3269 | */ | |
7fe0b973 KP |
3270 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
3271 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | |
3272 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | |
3273 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | |
3274 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | |
3275 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | |
3276 | } | |
3277 | ||
d46da437 PZ |
3278 | static void ibx_irq_postinstall(struct drm_device *dev) |
3279 | { | |
2d1013dd | 3280 | struct drm_i915_private *dev_priv = dev->dev_private; |
82a28bcf | 3281 | u32 mask; |
e5868a31 | 3282 | |
692a04cf DV |
3283 | if (HAS_PCH_NOP(dev)) |
3284 | return; | |
3285 | ||
105b122e | 3286 | if (HAS_PCH_IBX(dev)) |
5c673b60 | 3287 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON; |
105b122e | 3288 | else |
5c673b60 | 3289 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT; |
8664281b | 3290 | |
337ba017 | 3291 | GEN5_ASSERT_IIR_IS_ZERO(SDEIIR); |
d46da437 | 3292 | I915_WRITE(SDEIMR, ~mask); |
d46da437 PZ |
3293 | } |
3294 | ||
0a9a8c91 DV |
3295 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
3296 | { | |
3297 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3298 | u32 pm_irqs, gt_irqs; | |
3299 | ||
3300 | pm_irqs = gt_irqs = 0; | |
3301 | ||
3302 | dev_priv->gt_irq_mask = ~0; | |
040d2baa | 3303 | if (HAS_L3_DPF(dev)) { |
0a9a8c91 | 3304 | /* L3 parity interrupt is always unmasked. */ |
35a85ac6 BW |
3305 | dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); |
3306 | gt_irqs |= GT_PARITY_ERROR(dev); | |
0a9a8c91 DV |
3307 | } |
3308 | ||
3309 | gt_irqs |= GT_RENDER_USER_INTERRUPT; | |
3310 | if (IS_GEN5(dev)) { | |
3311 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | | |
3312 | ILK_BSD_USER_INTERRUPT; | |
3313 | } else { | |
3314 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | |
3315 | } | |
3316 | ||
35079899 | 3317 | GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); |
0a9a8c91 DV |
3318 | |
3319 | if (INTEL_INFO(dev)->gen >= 6) { | |
78e68d36 ID |
3320 | /* |
3321 | * RPS interrupts will get enabled/disabled on demand when RPS | |
3322 | * itself is enabled/disabled. | |
3323 | */ | |
0a9a8c91 DV |
3324 | if (HAS_VEBOX(dev)) |
3325 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; | |
3326 | ||
605cd25b | 3327 | dev_priv->pm_irq_mask = 0xffffffff; |
35079899 | 3328 | GEN5_IRQ_INIT(GEN6_PM, dev_priv->pm_irq_mask, pm_irqs); |
0a9a8c91 DV |
3329 | } |
3330 | } | |
3331 | ||
f71d4af4 | 3332 | static int ironlake_irq_postinstall(struct drm_device *dev) |
036a4a7d | 3333 | { |
2d1013dd | 3334 | struct drm_i915_private *dev_priv = dev->dev_private; |
8e76f8dc PZ |
3335 | u32 display_mask, extra_mask; |
3336 | ||
3337 | if (INTEL_INFO(dev)->gen >= 7) { | |
3338 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | |
3339 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | |
3340 | DE_PLANEB_FLIP_DONE_IVB | | |
5c673b60 | 3341 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB); |
8e76f8dc | 3342 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | |
5c673b60 | 3343 | DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB); |
8e76f8dc PZ |
3344 | } else { |
3345 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | |
3346 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | |
5b3a856b | 3347 | DE_AUX_CHANNEL_A | |
5b3a856b DV |
3348 | DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE | |
3349 | DE_POISON); | |
5c673b60 DV |
3350 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT | |
3351 | DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN; | |
8e76f8dc | 3352 | } |
036a4a7d | 3353 | |
1ec14ad3 | 3354 | dev_priv->irq_mask = ~display_mask; |
036a4a7d | 3355 | |
0c841212 PZ |
3356 | I915_WRITE(HWSTAM, 0xeffe); |
3357 | ||
622364b6 PZ |
3358 | ibx_irq_pre_postinstall(dev); |
3359 | ||
35079899 | 3360 | GEN5_IRQ_INIT(DE, dev_priv->irq_mask, display_mask | extra_mask); |
036a4a7d | 3361 | |
0a9a8c91 | 3362 | gen5_gt_irq_postinstall(dev); |
036a4a7d | 3363 | |
d46da437 | 3364 | ibx_irq_postinstall(dev); |
7fe0b973 | 3365 | |
f97108d1 | 3366 | if (IS_IRONLAKE_M(dev)) { |
6005ce42 DV |
3367 | /* Enable PCU event interrupts |
3368 | * | |
3369 | * spinlocking not required here for correctness since interrupt | |
4bc9d430 DV |
3370 | * setup is guaranteed to run in single-threaded context. But we |
3371 | * need it to make the assert_spin_locked happy. */ | |
d6207435 | 3372 | spin_lock_irq(&dev_priv->irq_lock); |
f97108d1 | 3373 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
d6207435 | 3374 | spin_unlock_irq(&dev_priv->irq_lock); |
f97108d1 JB |
3375 | } |
3376 | ||
036a4a7d ZW |
3377 | return 0; |
3378 | } | |
3379 | ||
f8b79e58 ID |
3380 | static void valleyview_display_irqs_install(struct drm_i915_private *dev_priv) |
3381 | { | |
3382 | u32 pipestat_mask; | |
3383 | u32 iir_mask; | |
120dda4f | 3384 | enum pipe pipe; |
f8b79e58 ID |
3385 | |
3386 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | |
3387 | PIPE_FIFO_UNDERRUN_STATUS; | |
3388 | ||
120dda4f VS |
3389 | for_each_pipe(dev_priv, pipe) |
3390 | I915_WRITE(PIPESTAT(pipe), pipestat_mask); | |
f8b79e58 ID |
3391 | POSTING_READ(PIPESTAT(PIPE_A)); |
3392 | ||
3393 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3394 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
3395 | ||
120dda4f VS |
3396 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); |
3397 | for_each_pipe(dev_priv, pipe) | |
3398 | i915_enable_pipestat(dev_priv, pipe, pipestat_mask); | |
f8b79e58 ID |
3399 | |
3400 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3401 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3402 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; | |
120dda4f VS |
3403 | if (IS_CHERRYVIEW(dev_priv)) |
3404 | iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | |
f8b79e58 ID |
3405 | dev_priv->irq_mask &= ~iir_mask; |
3406 | ||
3407 | I915_WRITE(VLV_IIR, iir_mask); | |
3408 | I915_WRITE(VLV_IIR, iir_mask); | |
f8b79e58 | 3409 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); |
76e41860 VS |
3410 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
3411 | POSTING_READ(VLV_IMR); | |
f8b79e58 ID |
3412 | } |
3413 | ||
3414 | static void valleyview_display_irqs_uninstall(struct drm_i915_private *dev_priv) | |
3415 | { | |
3416 | u32 pipestat_mask; | |
3417 | u32 iir_mask; | |
120dda4f | 3418 | enum pipe pipe; |
f8b79e58 ID |
3419 | |
3420 | iir_mask = I915_DISPLAY_PORT_INTERRUPT | | |
3421 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
6c7fba04 | 3422 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT; |
120dda4f VS |
3423 | if (IS_CHERRYVIEW(dev_priv)) |
3424 | iir_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT; | |
f8b79e58 ID |
3425 | |
3426 | dev_priv->irq_mask |= iir_mask; | |
f8b79e58 | 3427 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
76e41860 | 3428 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); |
f8b79e58 ID |
3429 | I915_WRITE(VLV_IIR, iir_mask); |
3430 | I915_WRITE(VLV_IIR, iir_mask); | |
3431 | POSTING_READ(VLV_IIR); | |
3432 | ||
3433 | pipestat_mask = PLANE_FLIP_DONE_INT_STATUS_VLV | | |
3434 | PIPE_CRC_DONE_INTERRUPT_STATUS; | |
3435 | ||
120dda4f VS |
3436 | i915_disable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); |
3437 | for_each_pipe(dev_priv, pipe) | |
3438 | i915_disable_pipestat(dev_priv, pipe, pipestat_mask); | |
f8b79e58 ID |
3439 | |
3440 | pipestat_mask = PIPESTAT_INT_STATUS_MASK | | |
3441 | PIPE_FIFO_UNDERRUN_STATUS; | |
120dda4f VS |
3442 | |
3443 | for_each_pipe(dev_priv, pipe) | |
3444 | I915_WRITE(PIPESTAT(pipe), pipestat_mask); | |
f8b79e58 ID |
3445 | POSTING_READ(PIPESTAT(PIPE_A)); |
3446 | } | |
3447 | ||
3448 | void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv) | |
3449 | { | |
3450 | assert_spin_locked(&dev_priv->irq_lock); | |
3451 | ||
3452 | if (dev_priv->display_irqs_enabled) | |
3453 | return; | |
3454 | ||
3455 | dev_priv->display_irqs_enabled = true; | |
3456 | ||
950eabaf | 3457 | if (intel_irqs_enabled(dev_priv)) |
f8b79e58 ID |
3458 | valleyview_display_irqs_install(dev_priv); |
3459 | } | |
3460 | ||
3461 | void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv) | |
3462 | { | |
3463 | assert_spin_locked(&dev_priv->irq_lock); | |
3464 | ||
3465 | if (!dev_priv->display_irqs_enabled) | |
3466 | return; | |
3467 | ||
3468 | dev_priv->display_irqs_enabled = false; | |
3469 | ||
950eabaf | 3470 | if (intel_irqs_enabled(dev_priv)) |
f8b79e58 ID |
3471 | valleyview_display_irqs_uninstall(dev_priv); |
3472 | } | |
3473 | ||
0e6c9a9e | 3474 | static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv) |
7e231dbe | 3475 | { |
f8b79e58 | 3476 | dev_priv->irq_mask = ~0; |
7e231dbe | 3477 | |
20afbda2 DV |
3478 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3479 | POSTING_READ(PORT_HOTPLUG_EN); | |
3480 | ||
7e231dbe | 3481 | I915_WRITE(VLV_IIR, 0xffffffff); |
76e41860 VS |
3482 | I915_WRITE(VLV_IIR, 0xffffffff); |
3483 | I915_WRITE(VLV_IER, ~dev_priv->irq_mask); | |
3484 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); | |
3485 | POSTING_READ(VLV_IMR); | |
7e231dbe | 3486 | |
b79480ba DV |
3487 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3488 | * just to make the assert_spin_locked check happy. */ | |
d6207435 | 3489 | spin_lock_irq(&dev_priv->irq_lock); |
f8b79e58 ID |
3490 | if (dev_priv->display_irqs_enabled) |
3491 | valleyview_display_irqs_install(dev_priv); | |
d6207435 | 3492 | spin_unlock_irq(&dev_priv->irq_lock); |
0e6c9a9e VS |
3493 | } |
3494 | ||
3495 | static int valleyview_irq_postinstall(struct drm_device *dev) | |
3496 | { | |
3497 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3498 | ||
3499 | vlv_display_irq_postinstall(dev_priv); | |
7e231dbe | 3500 | |
0a9a8c91 | 3501 | gen5_gt_irq_postinstall(dev); |
7e231dbe JB |
3502 | |
3503 | /* ack & enable invalid PTE error interrupts */ | |
3504 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | |
3505 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | |
3506 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | |
3507 | #endif | |
3508 | ||
3509 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
20afbda2 DV |
3510 | |
3511 | return 0; | |
3512 | } | |
3513 | ||
abd58f01 BW |
3514 | static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv) |
3515 | { | |
abd58f01 BW |
3516 | /* These are interrupts we'll toggle with the ring mask register */ |
3517 | uint32_t gt_interrupts[] = { | |
3518 | GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | | |
73d477f6 | 3519 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT | |
abd58f01 | 3520 | GT_RENDER_L3_PARITY_ERROR_INTERRUPT | |
73d477f6 OM |
3521 | GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT | |
3522 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT, | |
abd58f01 | 3523 | GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | |
73d477f6 OM |
3524 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT | |
3525 | GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT | | |
3526 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS2_IRQ_SHIFT, | |
abd58f01 | 3527 | 0, |
73d477f6 OM |
3528 | GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT | |
3529 | GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT | |
abd58f01 BW |
3530 | }; |
3531 | ||
0961021a | 3532 | dev_priv->pm_irq_mask = 0xffffffff; |
9a2d2d87 D |
3533 | GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); |
3534 | GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); | |
78e68d36 ID |
3535 | /* |
3536 | * RPS interrupts will get enabled/disabled on demand when RPS itself | |
3537 | * is enabled/disabled. | |
3538 | */ | |
3539 | GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); | |
9a2d2d87 | 3540 | GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); |
abd58f01 BW |
3541 | } |
3542 | ||
3543 | static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) | |
3544 | { | |
770de83d DL |
3545 | uint32_t de_pipe_masked = GEN8_PIPE_CDCLK_CRC_DONE; |
3546 | uint32_t de_pipe_enables; | |
abd58f01 | 3547 | int pipe; |
88e04703 | 3548 | u32 aux_en = GEN8_AUX_CHANNEL_A; |
770de83d | 3549 | |
88e04703 | 3550 | if (IS_GEN9(dev_priv)) { |
770de83d DL |
3551 | de_pipe_masked |= GEN9_PIPE_PLANE1_FLIP_DONE | |
3552 | GEN9_DE_PIPE_IRQ_FAULT_ERRORS; | |
88e04703 JB |
3553 | aux_en |= GEN9_AUX_CHANNEL_B | GEN9_AUX_CHANNEL_C | |
3554 | GEN9_AUX_CHANNEL_D; | |
3555 | } else | |
770de83d DL |
3556 | de_pipe_masked |= GEN8_PIPE_PRIMARY_FLIP_DONE | |
3557 | GEN8_DE_PIPE_IRQ_FAULT_ERRORS; | |
3558 | ||
3559 | de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK | | |
3560 | GEN8_PIPE_FIFO_UNDERRUN; | |
3561 | ||
13b3a0a7 DV |
3562 | dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked; |
3563 | dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked; | |
3564 | dev_priv->de_irq_mask[PIPE_C] = ~de_pipe_masked; | |
abd58f01 | 3565 | |
055e393f | 3566 | for_each_pipe(dev_priv, pipe) |
f458ebbc | 3567 | if (intel_display_power_is_enabled(dev_priv, |
813bde43 PZ |
3568 | POWER_DOMAIN_PIPE(pipe))) |
3569 | GEN8_IRQ_INIT_NDX(DE_PIPE, pipe, | |
3570 | dev_priv->de_irq_mask[pipe], | |
3571 | de_pipe_enables); | |
abd58f01 | 3572 | |
88e04703 | 3573 | GEN5_IRQ_INIT(GEN8_DE_PORT_, ~aux_en, aux_en); |
abd58f01 BW |
3574 | } |
3575 | ||
3576 | static int gen8_irq_postinstall(struct drm_device *dev) | |
3577 | { | |
3578 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3579 | ||
622364b6 PZ |
3580 | ibx_irq_pre_postinstall(dev); |
3581 | ||
abd58f01 BW |
3582 | gen8_gt_irq_postinstall(dev_priv); |
3583 | gen8_de_irq_postinstall(dev_priv); | |
3584 | ||
3585 | ibx_irq_postinstall(dev); | |
3586 | ||
3587 | I915_WRITE(GEN8_MASTER_IRQ, DE_MASTER_IRQ_CONTROL); | |
3588 | POSTING_READ(GEN8_MASTER_IRQ); | |
3589 | ||
3590 | return 0; | |
3591 | } | |
3592 | ||
43f328d7 VS |
3593 | static int cherryview_irq_postinstall(struct drm_device *dev) |
3594 | { | |
3595 | struct drm_i915_private *dev_priv = dev->dev_private; | |
43f328d7 | 3596 | |
c2b66797 | 3597 | vlv_display_irq_postinstall(dev_priv); |
43f328d7 VS |
3598 | |
3599 | gen8_gt_irq_postinstall(dev_priv); | |
3600 | ||
3601 | I915_WRITE(GEN8_MASTER_IRQ, MASTER_INTERRUPT_ENABLE); | |
3602 | POSTING_READ(GEN8_MASTER_IRQ); | |
3603 | ||
3604 | return 0; | |
3605 | } | |
3606 | ||
abd58f01 BW |
3607 | static void gen8_irq_uninstall(struct drm_device *dev) |
3608 | { | |
3609 | struct drm_i915_private *dev_priv = dev->dev_private; | |
abd58f01 BW |
3610 | |
3611 | if (!dev_priv) | |
3612 | return; | |
3613 | ||
823f6b38 | 3614 | gen8_irq_reset(dev); |
abd58f01 BW |
3615 | } |
3616 | ||
8ea0be4f VS |
3617 | static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv) |
3618 | { | |
3619 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
3620 | * just to make the assert_spin_locked check happy. */ | |
3621 | spin_lock_irq(&dev_priv->irq_lock); | |
3622 | if (dev_priv->display_irqs_enabled) | |
3623 | valleyview_display_irqs_uninstall(dev_priv); | |
3624 | spin_unlock_irq(&dev_priv->irq_lock); | |
3625 | ||
3626 | vlv_display_irq_reset(dev_priv); | |
3627 | ||
c352d1ba | 3628 | dev_priv->irq_mask = ~0; |
8ea0be4f VS |
3629 | } |
3630 | ||
7e231dbe JB |
3631 | static void valleyview_irq_uninstall(struct drm_device *dev) |
3632 | { | |
2d1013dd | 3633 | struct drm_i915_private *dev_priv = dev->dev_private; |
7e231dbe JB |
3634 | |
3635 | if (!dev_priv) | |
3636 | return; | |
3637 | ||
843d0e7d ID |
3638 | I915_WRITE(VLV_MASTER_IER, 0); |
3639 | ||
893fce8e VS |
3640 | gen5_gt_irq_reset(dev); |
3641 | ||
7e231dbe | 3642 | I915_WRITE(HWSTAM, 0xffffffff); |
f8b79e58 | 3643 | |
8ea0be4f | 3644 | vlv_display_irq_uninstall(dev_priv); |
7e231dbe JB |
3645 | } |
3646 | ||
43f328d7 VS |
3647 | static void cherryview_irq_uninstall(struct drm_device *dev) |
3648 | { | |
3649 | struct drm_i915_private *dev_priv = dev->dev_private; | |
43f328d7 VS |
3650 | |
3651 | if (!dev_priv) | |
3652 | return; | |
3653 | ||
3654 | I915_WRITE(GEN8_MASTER_IRQ, 0); | |
3655 | POSTING_READ(GEN8_MASTER_IRQ); | |
3656 | ||
a2c30fba | 3657 | gen8_gt_irq_reset(dev_priv); |
43f328d7 | 3658 | |
a2c30fba | 3659 | GEN5_IRQ_RESET(GEN8_PCU_); |
43f328d7 | 3660 | |
c2b66797 | 3661 | vlv_display_irq_uninstall(dev_priv); |
43f328d7 VS |
3662 | } |
3663 | ||
f71d4af4 | 3664 | static void ironlake_irq_uninstall(struct drm_device *dev) |
036a4a7d | 3665 | { |
2d1013dd | 3666 | struct drm_i915_private *dev_priv = dev->dev_private; |
4697995b JB |
3667 | |
3668 | if (!dev_priv) | |
3669 | return; | |
3670 | ||
be30b29f | 3671 | ironlake_irq_reset(dev); |
036a4a7d ZW |
3672 | } |
3673 | ||
a266c7d5 | 3674 | static void i8xx_irq_preinstall(struct drm_device * dev) |
1da177e4 | 3675 | { |
2d1013dd | 3676 | struct drm_i915_private *dev_priv = dev->dev_private; |
9db4a9c7 | 3677 | int pipe; |
91e3738e | 3678 | |
055e393f | 3679 | for_each_pipe(dev_priv, pipe) |
9db4a9c7 | 3680 | I915_WRITE(PIPESTAT(pipe), 0); |
a266c7d5 CW |
3681 | I915_WRITE16(IMR, 0xffff); |
3682 | I915_WRITE16(IER, 0x0); | |
3683 | POSTING_READ16(IER); | |
c2798b19 CW |
3684 | } |
3685 | ||
3686 | static int i8xx_irq_postinstall(struct drm_device *dev) | |
3687 | { | |
2d1013dd | 3688 | struct drm_i915_private *dev_priv = dev->dev_private; |
c2798b19 | 3689 | |
c2798b19 CW |
3690 | I915_WRITE16(EMR, |
3691 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
3692 | ||
3693 | /* Unmask the interrupts that we always want on. */ | |
3694 | dev_priv->irq_mask = | |
3695 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3696 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3697 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3698 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
3699 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
3700 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
3701 | ||
3702 | I915_WRITE16(IER, | |
3703 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3704 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3705 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
3706 | I915_USER_INTERRUPT); | |
3707 | POSTING_READ16(IER); | |
3708 | ||
379ef82d DV |
3709 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3710 | * just to make the assert_spin_locked check happy. */ | |
d6207435 | 3711 | spin_lock_irq(&dev_priv->irq_lock); |
755e9019 ID |
3712 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3713 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
d6207435 | 3714 | spin_unlock_irq(&dev_priv->irq_lock); |
379ef82d | 3715 | |
c2798b19 CW |
3716 | return 0; |
3717 | } | |
3718 | ||
90a72f87 VS |
3719 | /* |
3720 | * Returns true when a page flip has completed. | |
3721 | */ | |
3722 | static bool i8xx_handle_vblank(struct drm_device *dev, | |
1f1c2e24 | 3723 | int plane, int pipe, u32 iir) |
90a72f87 | 3724 | { |
2d1013dd | 3725 | struct drm_i915_private *dev_priv = dev->dev_private; |
1f1c2e24 | 3726 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
90a72f87 | 3727 | |
8d7849db | 3728 | if (!intel_pipe_handle_vblank(dev, pipe)) |
90a72f87 VS |
3729 | return false; |
3730 | ||
3731 | if ((iir & flip_pending) == 0) | |
d6bbafa1 | 3732 | goto check_page_flip; |
90a72f87 | 3733 | |
1f1c2e24 | 3734 | intel_prepare_page_flip(dev, plane); |
90a72f87 VS |
3735 | |
3736 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
3737 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
3738 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
3739 | * the flip is completed (no longer pending). Since this doesn't raise | |
3740 | * an interrupt per se, we watch for the change at vblank. | |
3741 | */ | |
3742 | if (I915_READ16(ISR) & flip_pending) | |
d6bbafa1 | 3743 | goto check_page_flip; |
90a72f87 VS |
3744 | |
3745 | intel_finish_page_flip(dev, pipe); | |
90a72f87 | 3746 | return true; |
d6bbafa1 CW |
3747 | |
3748 | check_page_flip: | |
3749 | intel_check_page_flip(dev, pipe); | |
3750 | return false; | |
90a72f87 VS |
3751 | } |
3752 | ||
ff1f525e | 3753 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
c2798b19 | 3754 | { |
45a83f84 | 3755 | struct drm_device *dev = arg; |
2d1013dd | 3756 | struct drm_i915_private *dev_priv = dev->dev_private; |
c2798b19 CW |
3757 | u16 iir, new_iir; |
3758 | u32 pipe_stats[2]; | |
c2798b19 CW |
3759 | int pipe; |
3760 | u16 flip_mask = | |
3761 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3762 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
3763 | ||
c2798b19 CW |
3764 | iir = I915_READ16(IIR); |
3765 | if (iir == 0) | |
3766 | return IRQ_NONE; | |
3767 | ||
3768 | while (iir & ~flip_mask) { | |
3769 | /* Can't rely on pipestat interrupt bit in iir as it might | |
3770 | * have been cleared after the pipestat interrupt was received. | |
3771 | * It doesn't set the bit in iir again, but it still produces | |
3772 | * interrupts (for non-MSI). | |
3773 | */ | |
222c7f51 | 3774 | spin_lock(&dev_priv->irq_lock); |
c2798b19 | 3775 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
aaecdf61 | 3776 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
c2798b19 | 3777 | |
055e393f | 3778 | for_each_pipe(dev_priv, pipe) { |
c2798b19 CW |
3779 | int reg = PIPESTAT(pipe); |
3780 | pipe_stats[pipe] = I915_READ(reg); | |
3781 | ||
3782 | /* | |
3783 | * Clear the PIPE*STAT regs before the IIR | |
3784 | */ | |
2d9d2b0b | 3785 | if (pipe_stats[pipe] & 0x8000ffff) |
c2798b19 | 3786 | I915_WRITE(reg, pipe_stats[pipe]); |
c2798b19 | 3787 | } |
222c7f51 | 3788 | spin_unlock(&dev_priv->irq_lock); |
c2798b19 CW |
3789 | |
3790 | I915_WRITE16(IIR, iir & ~flip_mask); | |
3791 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | |
3792 | ||
c2798b19 CW |
3793 | if (iir & I915_USER_INTERRUPT) |
3794 | notify_ring(dev, &dev_priv->ring[RCS]); | |
3795 | ||
055e393f | 3796 | for_each_pipe(dev_priv, pipe) { |
1f1c2e24 | 3797 | int plane = pipe; |
3a77c4c4 | 3798 | if (HAS_FBC(dev)) |
1f1c2e24 VS |
3799 | plane = !plane; |
3800 | ||
4356d586 | 3801 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
1f1c2e24 VS |
3802 | i8xx_handle_vblank(dev, plane, pipe, iir)) |
3803 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
c2798b19 | 3804 | |
4356d586 | 3805 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) |
277de95e | 3806 | i9xx_pipe_crc_irq_handler(dev, pipe); |
2d9d2b0b | 3807 | |
1f7247c0 DV |
3808 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
3809 | intel_cpu_fifo_underrun_irq_handler(dev_priv, | |
3810 | pipe); | |
4356d586 | 3811 | } |
c2798b19 CW |
3812 | |
3813 | iir = new_iir; | |
3814 | } | |
3815 | ||
3816 | return IRQ_HANDLED; | |
3817 | } | |
3818 | ||
3819 | static void i8xx_irq_uninstall(struct drm_device * dev) | |
3820 | { | |
2d1013dd | 3821 | struct drm_i915_private *dev_priv = dev->dev_private; |
c2798b19 CW |
3822 | int pipe; |
3823 | ||
055e393f | 3824 | for_each_pipe(dev_priv, pipe) { |
c2798b19 CW |
3825 | /* Clear enable bits; then clear status bits */ |
3826 | I915_WRITE(PIPESTAT(pipe), 0); | |
3827 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
3828 | } | |
3829 | I915_WRITE16(IMR, 0xffff); | |
3830 | I915_WRITE16(IER, 0x0); | |
3831 | I915_WRITE16(IIR, I915_READ16(IIR)); | |
3832 | } | |
3833 | ||
a266c7d5 CW |
3834 | static void i915_irq_preinstall(struct drm_device * dev) |
3835 | { | |
2d1013dd | 3836 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
3837 | int pipe; |
3838 | ||
a266c7d5 CW |
3839 | if (I915_HAS_HOTPLUG(dev)) { |
3840 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3841 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3842 | } | |
3843 | ||
00d98ebd | 3844 | I915_WRITE16(HWSTAM, 0xeffe); |
055e393f | 3845 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
3846 | I915_WRITE(PIPESTAT(pipe), 0); |
3847 | I915_WRITE(IMR, 0xffffffff); | |
3848 | I915_WRITE(IER, 0x0); | |
3849 | POSTING_READ(IER); | |
3850 | } | |
3851 | ||
3852 | static int i915_irq_postinstall(struct drm_device *dev) | |
3853 | { | |
2d1013dd | 3854 | struct drm_i915_private *dev_priv = dev->dev_private; |
38bde180 | 3855 | u32 enable_mask; |
a266c7d5 | 3856 | |
38bde180 CW |
3857 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
3858 | ||
3859 | /* Unmask the interrupts that we always want on. */ | |
3860 | dev_priv->irq_mask = | |
3861 | ~(I915_ASLE_INTERRUPT | | |
3862 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3863 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3864 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3865 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
3866 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
3867 | ||
3868 | enable_mask = | |
3869 | I915_ASLE_INTERRUPT | | |
3870 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3871 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3872 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
3873 | I915_USER_INTERRUPT; | |
3874 | ||
a266c7d5 | 3875 | if (I915_HAS_HOTPLUG(dev)) { |
20afbda2 DV |
3876 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3877 | POSTING_READ(PORT_HOTPLUG_EN); | |
3878 | ||
a266c7d5 CW |
3879 | /* Enable in IER... */ |
3880 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | |
3881 | /* and unmask in IMR */ | |
3882 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | |
3883 | } | |
3884 | ||
a266c7d5 CW |
3885 | I915_WRITE(IMR, dev_priv->irq_mask); |
3886 | I915_WRITE(IER, enable_mask); | |
3887 | POSTING_READ(IER); | |
3888 | ||
f49e38dd | 3889 | i915_enable_asle_pipestat(dev); |
20afbda2 | 3890 | |
379ef82d DV |
3891 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3892 | * just to make the assert_spin_locked check happy. */ | |
d6207435 | 3893 | spin_lock_irq(&dev_priv->irq_lock); |
755e9019 ID |
3894 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); |
3895 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
d6207435 | 3896 | spin_unlock_irq(&dev_priv->irq_lock); |
379ef82d | 3897 | |
20afbda2 DV |
3898 | return 0; |
3899 | } | |
3900 | ||
90a72f87 VS |
3901 | /* |
3902 | * Returns true when a page flip has completed. | |
3903 | */ | |
3904 | static bool i915_handle_vblank(struct drm_device *dev, | |
3905 | int plane, int pipe, u32 iir) | |
3906 | { | |
2d1013dd | 3907 | struct drm_i915_private *dev_priv = dev->dev_private; |
90a72f87 VS |
3908 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); |
3909 | ||
8d7849db | 3910 | if (!intel_pipe_handle_vblank(dev, pipe)) |
90a72f87 VS |
3911 | return false; |
3912 | ||
3913 | if ((iir & flip_pending) == 0) | |
d6bbafa1 | 3914 | goto check_page_flip; |
90a72f87 VS |
3915 | |
3916 | intel_prepare_page_flip(dev, plane); | |
3917 | ||
3918 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
3919 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
3920 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
3921 | * the flip is completed (no longer pending). Since this doesn't raise | |
3922 | * an interrupt per se, we watch for the change at vblank. | |
3923 | */ | |
3924 | if (I915_READ(ISR) & flip_pending) | |
d6bbafa1 | 3925 | goto check_page_flip; |
90a72f87 VS |
3926 | |
3927 | intel_finish_page_flip(dev, pipe); | |
90a72f87 | 3928 | return true; |
d6bbafa1 CW |
3929 | |
3930 | check_page_flip: | |
3931 | intel_check_page_flip(dev, pipe); | |
3932 | return false; | |
90a72f87 VS |
3933 | } |
3934 | ||
ff1f525e | 3935 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
a266c7d5 | 3936 | { |
45a83f84 | 3937 | struct drm_device *dev = arg; |
2d1013dd | 3938 | struct drm_i915_private *dev_priv = dev->dev_private; |
8291ee90 | 3939 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
38bde180 CW |
3940 | u32 flip_mask = |
3941 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3942 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
38bde180 | 3943 | int pipe, ret = IRQ_NONE; |
a266c7d5 | 3944 | |
a266c7d5 | 3945 | iir = I915_READ(IIR); |
38bde180 CW |
3946 | do { |
3947 | bool irq_received = (iir & ~flip_mask) != 0; | |
8291ee90 | 3948 | bool blc_event = false; |
a266c7d5 CW |
3949 | |
3950 | /* Can't rely on pipestat interrupt bit in iir as it might | |
3951 | * have been cleared after the pipestat interrupt was received. | |
3952 | * It doesn't set the bit in iir again, but it still produces | |
3953 | * interrupts (for non-MSI). | |
3954 | */ | |
222c7f51 | 3955 | spin_lock(&dev_priv->irq_lock); |
a266c7d5 | 3956 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
aaecdf61 | 3957 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
a266c7d5 | 3958 | |
055e393f | 3959 | for_each_pipe(dev_priv, pipe) { |
a266c7d5 CW |
3960 | int reg = PIPESTAT(pipe); |
3961 | pipe_stats[pipe] = I915_READ(reg); | |
3962 | ||
38bde180 | 3963 | /* Clear the PIPE*STAT regs before the IIR */ |
a266c7d5 | 3964 | if (pipe_stats[pipe] & 0x8000ffff) { |
a266c7d5 | 3965 | I915_WRITE(reg, pipe_stats[pipe]); |
38bde180 | 3966 | irq_received = true; |
a266c7d5 CW |
3967 | } |
3968 | } | |
222c7f51 | 3969 | spin_unlock(&dev_priv->irq_lock); |
a266c7d5 CW |
3970 | |
3971 | if (!irq_received) | |
3972 | break; | |
3973 | ||
a266c7d5 | 3974 | /* Consume port. Then clear IIR or we'll miss events */ |
16c6c56b VS |
3975 | if (I915_HAS_HOTPLUG(dev) && |
3976 | iir & I915_DISPLAY_PORT_INTERRUPT) | |
3977 | i9xx_hpd_irq_handler(dev); | |
a266c7d5 | 3978 | |
38bde180 | 3979 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
3980 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
3981 | ||
a266c7d5 CW |
3982 | if (iir & I915_USER_INTERRUPT) |
3983 | notify_ring(dev, &dev_priv->ring[RCS]); | |
a266c7d5 | 3984 | |
055e393f | 3985 | for_each_pipe(dev_priv, pipe) { |
38bde180 | 3986 | int plane = pipe; |
3a77c4c4 | 3987 | if (HAS_FBC(dev)) |
38bde180 | 3988 | plane = !plane; |
90a72f87 | 3989 | |
8291ee90 | 3990 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
3991 | i915_handle_vblank(dev, plane, pipe, iir)) |
3992 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
a266c7d5 CW |
3993 | |
3994 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
3995 | blc_event = true; | |
4356d586 DV |
3996 | |
3997 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
277de95e | 3998 | i9xx_pipe_crc_irq_handler(dev, pipe); |
2d9d2b0b | 3999 | |
1f7247c0 DV |
4000 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
4001 | intel_cpu_fifo_underrun_irq_handler(dev_priv, | |
4002 | pipe); | |
a266c7d5 CW |
4003 | } |
4004 | ||
a266c7d5 CW |
4005 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
4006 | intel_opregion_asle_intr(dev); | |
4007 | ||
4008 | /* With MSI, interrupts are only generated when iir | |
4009 | * transitions from zero to nonzero. If another bit got | |
4010 | * set while we were handling the existing iir bits, then | |
4011 | * we would never get another interrupt. | |
4012 | * | |
4013 | * This is fine on non-MSI as well, as if we hit this path | |
4014 | * we avoid exiting the interrupt handler only to generate | |
4015 | * another one. | |
4016 | * | |
4017 | * Note that for MSI this could cause a stray interrupt report | |
4018 | * if an interrupt landed in the time between writing IIR and | |
4019 | * the posting read. This should be rare enough to never | |
4020 | * trigger the 99% of 100,000 interrupts test for disabling | |
4021 | * stray interrupts. | |
4022 | */ | |
38bde180 | 4023 | ret = IRQ_HANDLED; |
a266c7d5 | 4024 | iir = new_iir; |
38bde180 | 4025 | } while (iir & ~flip_mask); |
a266c7d5 CW |
4026 | |
4027 | return ret; | |
4028 | } | |
4029 | ||
4030 | static void i915_irq_uninstall(struct drm_device * dev) | |
4031 | { | |
2d1013dd | 4032 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4033 | int pipe; |
4034 | ||
a266c7d5 CW |
4035 | if (I915_HAS_HOTPLUG(dev)) { |
4036 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
4037 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
4038 | } | |
4039 | ||
00d98ebd | 4040 | I915_WRITE16(HWSTAM, 0xffff); |
055e393f | 4041 | for_each_pipe(dev_priv, pipe) { |
55b39755 | 4042 | /* Clear enable bits; then clear status bits */ |
a266c7d5 | 4043 | I915_WRITE(PIPESTAT(pipe), 0); |
55b39755 CW |
4044 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
4045 | } | |
a266c7d5 CW |
4046 | I915_WRITE(IMR, 0xffffffff); |
4047 | I915_WRITE(IER, 0x0); | |
4048 | ||
a266c7d5 CW |
4049 | I915_WRITE(IIR, I915_READ(IIR)); |
4050 | } | |
4051 | ||
4052 | static void i965_irq_preinstall(struct drm_device * dev) | |
4053 | { | |
2d1013dd | 4054 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4055 | int pipe; |
4056 | ||
adca4730 CW |
4057 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
4058 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
4059 | |
4060 | I915_WRITE(HWSTAM, 0xeffe); | |
055e393f | 4061 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
4062 | I915_WRITE(PIPESTAT(pipe), 0); |
4063 | I915_WRITE(IMR, 0xffffffff); | |
4064 | I915_WRITE(IER, 0x0); | |
4065 | POSTING_READ(IER); | |
4066 | } | |
4067 | ||
4068 | static int i965_irq_postinstall(struct drm_device *dev) | |
4069 | { | |
2d1013dd | 4070 | struct drm_i915_private *dev_priv = dev->dev_private; |
bbba0a97 | 4071 | u32 enable_mask; |
a266c7d5 CW |
4072 | u32 error_mask; |
4073 | ||
a266c7d5 | 4074 | /* Unmask the interrupts that we always want on. */ |
bbba0a97 | 4075 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
adca4730 | 4076 | I915_DISPLAY_PORT_INTERRUPT | |
bbba0a97 CW |
4077 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
4078 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
4079 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4080 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
4081 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
4082 | ||
4083 | enable_mask = ~dev_priv->irq_mask; | |
21ad8330 VS |
4084 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
4085 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); | |
bbba0a97 CW |
4086 | enable_mask |= I915_USER_INTERRUPT; |
4087 | ||
4088 | if (IS_G4X(dev)) | |
4089 | enable_mask |= I915_BSD_USER_INTERRUPT; | |
a266c7d5 | 4090 | |
b79480ba DV |
4091 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
4092 | * just to make the assert_spin_locked check happy. */ | |
d6207435 | 4093 | spin_lock_irq(&dev_priv->irq_lock); |
755e9019 ID |
4094 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_GMBUS_INTERRUPT_STATUS); |
4095 | i915_enable_pipestat(dev_priv, PIPE_A, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
4096 | i915_enable_pipestat(dev_priv, PIPE_B, PIPE_CRC_DONE_INTERRUPT_STATUS); | |
d6207435 | 4097 | spin_unlock_irq(&dev_priv->irq_lock); |
a266c7d5 | 4098 | |
a266c7d5 CW |
4099 | /* |
4100 | * Enable some error detection, note the instruction error mask | |
4101 | * bit is reserved, so we leave it masked. | |
4102 | */ | |
4103 | if (IS_G4X(dev)) { | |
4104 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | |
4105 | GM45_ERROR_MEM_PRIV | | |
4106 | GM45_ERROR_CP_PRIV | | |
4107 | I915_ERROR_MEMORY_REFRESH); | |
4108 | } else { | |
4109 | error_mask = ~(I915_ERROR_PAGE_TABLE | | |
4110 | I915_ERROR_MEMORY_REFRESH); | |
4111 | } | |
4112 | I915_WRITE(EMR, error_mask); | |
4113 | ||
4114 | I915_WRITE(IMR, dev_priv->irq_mask); | |
4115 | I915_WRITE(IER, enable_mask); | |
4116 | POSTING_READ(IER); | |
4117 | ||
20afbda2 DV |
4118 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
4119 | POSTING_READ(PORT_HOTPLUG_EN); | |
4120 | ||
f49e38dd | 4121 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
4122 | |
4123 | return 0; | |
4124 | } | |
4125 | ||
bac56d5b | 4126 | static void i915_hpd_irq_setup(struct drm_device *dev) |
20afbda2 | 4127 | { |
2d1013dd | 4128 | struct drm_i915_private *dev_priv = dev->dev_private; |
cd569aed | 4129 | struct intel_encoder *intel_encoder; |
20afbda2 DV |
4130 | u32 hotplug_en; |
4131 | ||
b5ea2d56 DV |
4132 | assert_spin_locked(&dev_priv->irq_lock); |
4133 | ||
bac56d5b EE |
4134 | if (I915_HAS_HOTPLUG(dev)) { |
4135 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
4136 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | |
4137 | /* Note HDMI and DP share hotplug bits */ | |
e5868a31 | 4138 | /* enable bits are the same for all generations */ |
b2784e15 | 4139 | for_each_intel_encoder(dev, intel_encoder) |
cd569aed EE |
4140 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
4141 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | |
bac56d5b EE |
4142 | /* Programming the CRT detection parameters tends |
4143 | to generate a spurious hotplug event about three | |
4144 | seconds later. So just do it once. | |
4145 | */ | |
4146 | if (IS_G4X(dev)) | |
4147 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | |
85fc95ba | 4148 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
bac56d5b | 4149 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
a266c7d5 | 4150 | |
bac56d5b EE |
4151 | /* Ignore TV since it's buggy */ |
4152 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
4153 | } | |
a266c7d5 CW |
4154 | } |
4155 | ||
ff1f525e | 4156 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
a266c7d5 | 4157 | { |
45a83f84 | 4158 | struct drm_device *dev = arg; |
2d1013dd | 4159 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4160 | u32 iir, new_iir; |
4161 | u32 pipe_stats[I915_MAX_PIPES]; | |
a266c7d5 | 4162 | int ret = IRQ_NONE, pipe; |
21ad8330 VS |
4163 | u32 flip_mask = |
4164 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
4165 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
a266c7d5 | 4166 | |
a266c7d5 CW |
4167 | iir = I915_READ(IIR); |
4168 | ||
a266c7d5 | 4169 | for (;;) { |
501e01d7 | 4170 | bool irq_received = (iir & ~flip_mask) != 0; |
2c8ba29f CW |
4171 | bool blc_event = false; |
4172 | ||
a266c7d5 CW |
4173 | /* Can't rely on pipestat interrupt bit in iir as it might |
4174 | * have been cleared after the pipestat interrupt was received. | |
4175 | * It doesn't set the bit in iir again, but it still produces | |
4176 | * interrupts (for non-MSI). | |
4177 | */ | |
222c7f51 | 4178 | spin_lock(&dev_priv->irq_lock); |
a266c7d5 | 4179 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
aaecdf61 | 4180 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
a266c7d5 | 4181 | |
055e393f | 4182 | for_each_pipe(dev_priv, pipe) { |
a266c7d5 CW |
4183 | int reg = PIPESTAT(pipe); |
4184 | pipe_stats[pipe] = I915_READ(reg); | |
4185 | ||
4186 | /* | |
4187 | * Clear the PIPE*STAT regs before the IIR | |
4188 | */ | |
4189 | if (pipe_stats[pipe] & 0x8000ffff) { | |
a266c7d5 | 4190 | I915_WRITE(reg, pipe_stats[pipe]); |
501e01d7 | 4191 | irq_received = true; |
a266c7d5 CW |
4192 | } |
4193 | } | |
222c7f51 | 4194 | spin_unlock(&dev_priv->irq_lock); |
a266c7d5 CW |
4195 | |
4196 | if (!irq_received) | |
4197 | break; | |
4198 | ||
4199 | ret = IRQ_HANDLED; | |
4200 | ||
4201 | /* Consume port. Then clear IIR or we'll miss events */ | |
16c6c56b VS |
4202 | if (iir & I915_DISPLAY_PORT_INTERRUPT) |
4203 | i9xx_hpd_irq_handler(dev); | |
a266c7d5 | 4204 | |
21ad8330 | 4205 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
4206 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
4207 | ||
a266c7d5 CW |
4208 | if (iir & I915_USER_INTERRUPT) |
4209 | notify_ring(dev, &dev_priv->ring[RCS]); | |
4210 | if (iir & I915_BSD_USER_INTERRUPT) | |
4211 | notify_ring(dev, &dev_priv->ring[VCS]); | |
4212 | ||
055e393f | 4213 | for_each_pipe(dev_priv, pipe) { |
2c8ba29f | 4214 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
4215 | i915_handle_vblank(dev, pipe, pipe, iir)) |
4216 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | |
a266c7d5 CW |
4217 | |
4218 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
4219 | blc_event = true; | |
4356d586 DV |
4220 | |
4221 | if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS) | |
277de95e | 4222 | i9xx_pipe_crc_irq_handler(dev, pipe); |
a266c7d5 | 4223 | |
1f7247c0 DV |
4224 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
4225 | intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe); | |
2d9d2b0b | 4226 | } |
a266c7d5 CW |
4227 | |
4228 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
4229 | intel_opregion_asle_intr(dev); | |
4230 | ||
515ac2bb DV |
4231 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
4232 | gmbus_irq_handler(dev); | |
4233 | ||
a266c7d5 CW |
4234 | /* With MSI, interrupts are only generated when iir |
4235 | * transitions from zero to nonzero. If another bit got | |
4236 | * set while we were handling the existing iir bits, then | |
4237 | * we would never get another interrupt. | |
4238 | * | |
4239 | * This is fine on non-MSI as well, as if we hit this path | |
4240 | * we avoid exiting the interrupt handler only to generate | |
4241 | * another one. | |
4242 | * | |
4243 | * Note that for MSI this could cause a stray interrupt report | |
4244 | * if an interrupt landed in the time between writing IIR and | |
4245 | * the posting read. This should be rare enough to never | |
4246 | * trigger the 99% of 100,000 interrupts test for disabling | |
4247 | * stray interrupts. | |
4248 | */ | |
4249 | iir = new_iir; | |
4250 | } | |
4251 | ||
4252 | return ret; | |
4253 | } | |
4254 | ||
4255 | static void i965_irq_uninstall(struct drm_device * dev) | |
4256 | { | |
2d1013dd | 4257 | struct drm_i915_private *dev_priv = dev->dev_private; |
a266c7d5 CW |
4258 | int pipe; |
4259 | ||
4260 | if (!dev_priv) | |
4261 | return; | |
4262 | ||
adca4730 CW |
4263 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
4264 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
4265 | |
4266 | I915_WRITE(HWSTAM, 0xffffffff); | |
055e393f | 4267 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
4268 | I915_WRITE(PIPESTAT(pipe), 0); |
4269 | I915_WRITE(IMR, 0xffffffff); | |
4270 | I915_WRITE(IER, 0x0); | |
4271 | ||
055e393f | 4272 | for_each_pipe(dev_priv, pipe) |
a266c7d5 CW |
4273 | I915_WRITE(PIPESTAT(pipe), |
4274 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | |
4275 | I915_WRITE(IIR, I915_READ(IIR)); | |
4276 | } | |
4277 | ||
4cb21832 | 4278 | static void intel_hpd_irq_reenable_work(struct work_struct *work) |
ac4c16c5 | 4279 | { |
6323751d ID |
4280 | struct drm_i915_private *dev_priv = |
4281 | container_of(work, typeof(*dev_priv), | |
4282 | hotplug_reenable_work.work); | |
ac4c16c5 EE |
4283 | struct drm_device *dev = dev_priv->dev; |
4284 | struct drm_mode_config *mode_config = &dev->mode_config; | |
ac4c16c5 EE |
4285 | int i; |
4286 | ||
6323751d ID |
4287 | intel_runtime_pm_get(dev_priv); |
4288 | ||
4cb21832 | 4289 | spin_lock_irq(&dev_priv->irq_lock); |
ac4c16c5 EE |
4290 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { |
4291 | struct drm_connector *connector; | |
4292 | ||
4293 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | |
4294 | continue; | |
4295 | ||
4296 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
4297 | ||
4298 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
4299 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
4300 | ||
4301 | if (intel_connector->encoder->hpd_pin == i) { | |
4302 | if (connector->polled != intel_connector->polled) | |
4303 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | |
c23cc417 | 4304 | connector->name); |
ac4c16c5 EE |
4305 | connector->polled = intel_connector->polled; |
4306 | if (!connector->polled) | |
4307 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
4308 | } | |
4309 | } | |
4310 | } | |
4311 | if (dev_priv->display.hpd_irq_setup) | |
4312 | dev_priv->display.hpd_irq_setup(dev); | |
4cb21832 | 4313 | spin_unlock_irq(&dev_priv->irq_lock); |
6323751d ID |
4314 | |
4315 | intel_runtime_pm_put(dev_priv); | |
ac4c16c5 EE |
4316 | } |
4317 | ||
fca52a55 DV |
4318 | /** |
4319 | * intel_irq_init - initializes irq support | |
4320 | * @dev_priv: i915 device instance | |
4321 | * | |
4322 | * This function initializes all the irq support including work items, timers | |
4323 | * and all the vtables. It does not setup the interrupt itself though. | |
4324 | */ | |
b963291c | 4325 | void intel_irq_init(struct drm_i915_private *dev_priv) |
f71d4af4 | 4326 | { |
b963291c | 4327 | struct drm_device *dev = dev_priv->dev; |
8b2e326d CW |
4328 | |
4329 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | |
13cf5504 | 4330 | INIT_WORK(&dev_priv->dig_port_work, i915_digport_work_func); |
99584db3 | 4331 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
c6a828d3 | 4332 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
a4da4fa4 | 4333 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
8b2e326d | 4334 | |
a6706b45 | 4335 | /* Let's track the enabled rps events */ |
b963291c | 4336 | if (IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) |
6c65a587 | 4337 | /* WaGsvRC0ResidencyMethod:vlv */ |
31685c25 D |
4338 | dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; |
4339 | else | |
4340 | dev_priv->pm_rps_events = GEN6_PM_RPS_EVENTS; | |
a6706b45 | 4341 | |
99584db3 DV |
4342 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
4343 | i915_hangcheck_elapsed, | |
61bac78e | 4344 | (unsigned long) dev); |
6323751d | 4345 | INIT_DELAYED_WORK(&dev_priv->hotplug_reenable_work, |
4cb21832 | 4346 | intel_hpd_irq_reenable_work); |
61bac78e | 4347 | |
97a19a24 | 4348 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
9ee32fea | 4349 | |
b963291c | 4350 | if (IS_GEN2(dev_priv)) { |
4cdb83ec VS |
4351 | dev->max_vblank_count = 0; |
4352 | dev->driver->get_vblank_counter = i8xx_get_vblank_counter; | |
b963291c | 4353 | } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { |
f71d4af4 JB |
4354 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
4355 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | |
391f75e2 VS |
4356 | } else { |
4357 | dev->driver->get_vblank_counter = i915_get_vblank_counter; | |
4358 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | |
f71d4af4 JB |
4359 | } |
4360 | ||
21da2700 VS |
4361 | /* |
4362 | * Opt out of the vblank disable timer on everything except gen2. | |
4363 | * Gen2 doesn't have a hardware frame counter and so depends on | |
4364 | * vblank interrupts to produce sane vblank seuquence numbers. | |
4365 | */ | |
b963291c | 4366 | if (!IS_GEN2(dev_priv)) |
21da2700 VS |
4367 | dev->vblank_disable_immediate = true; |
4368 | ||
c2baf4b7 | 4369 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
c3613de9 | 4370 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
c2baf4b7 VS |
4371 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
4372 | } | |
f71d4af4 | 4373 | |
b963291c | 4374 | if (IS_CHERRYVIEW(dev_priv)) { |
43f328d7 VS |
4375 | dev->driver->irq_handler = cherryview_irq_handler; |
4376 | dev->driver->irq_preinstall = cherryview_irq_preinstall; | |
4377 | dev->driver->irq_postinstall = cherryview_irq_postinstall; | |
4378 | dev->driver->irq_uninstall = cherryview_irq_uninstall; | |
4379 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
4380 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
4381 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; | |
b963291c | 4382 | } else if (IS_VALLEYVIEW(dev_priv)) { |
7e231dbe JB |
4383 | dev->driver->irq_handler = valleyview_irq_handler; |
4384 | dev->driver->irq_preinstall = valleyview_irq_preinstall; | |
4385 | dev->driver->irq_postinstall = valleyview_irq_postinstall; | |
4386 | dev->driver->irq_uninstall = valleyview_irq_uninstall; | |
4387 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
4388 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
fa00abe0 | 4389 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
b963291c | 4390 | } else if (INTEL_INFO(dev_priv)->gen >= 8) { |
abd58f01 | 4391 | dev->driver->irq_handler = gen8_irq_handler; |
723761b8 | 4392 | dev->driver->irq_preinstall = gen8_irq_reset; |
abd58f01 BW |
4393 | dev->driver->irq_postinstall = gen8_irq_postinstall; |
4394 | dev->driver->irq_uninstall = gen8_irq_uninstall; | |
4395 | dev->driver->enable_vblank = gen8_enable_vblank; | |
4396 | dev->driver->disable_vblank = gen8_disable_vblank; | |
4397 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; | |
f71d4af4 JB |
4398 | } else if (HAS_PCH_SPLIT(dev)) { |
4399 | dev->driver->irq_handler = ironlake_irq_handler; | |
723761b8 | 4400 | dev->driver->irq_preinstall = ironlake_irq_reset; |
f71d4af4 JB |
4401 | dev->driver->irq_postinstall = ironlake_irq_postinstall; |
4402 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
4403 | dev->driver->enable_vblank = ironlake_enable_vblank; | |
4404 | dev->driver->disable_vblank = ironlake_disable_vblank; | |
82a28bcf | 4405 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
f71d4af4 | 4406 | } else { |
b963291c | 4407 | if (INTEL_INFO(dev_priv)->gen == 2) { |
c2798b19 CW |
4408 | dev->driver->irq_preinstall = i8xx_irq_preinstall; |
4409 | dev->driver->irq_postinstall = i8xx_irq_postinstall; | |
4410 | dev->driver->irq_handler = i8xx_irq_handler; | |
4411 | dev->driver->irq_uninstall = i8xx_irq_uninstall; | |
b963291c | 4412 | } else if (INTEL_INFO(dev_priv)->gen == 3) { |
a266c7d5 CW |
4413 | dev->driver->irq_preinstall = i915_irq_preinstall; |
4414 | dev->driver->irq_postinstall = i915_irq_postinstall; | |
4415 | dev->driver->irq_uninstall = i915_irq_uninstall; | |
4416 | dev->driver->irq_handler = i915_irq_handler; | |
20afbda2 | 4417 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 4418 | } else { |
a266c7d5 CW |
4419 | dev->driver->irq_preinstall = i965_irq_preinstall; |
4420 | dev->driver->irq_postinstall = i965_irq_postinstall; | |
4421 | dev->driver->irq_uninstall = i965_irq_uninstall; | |
4422 | dev->driver->irq_handler = i965_irq_handler; | |
bac56d5b | 4423 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 4424 | } |
f71d4af4 JB |
4425 | dev->driver->enable_vblank = i915_enable_vblank; |
4426 | dev->driver->disable_vblank = i915_disable_vblank; | |
4427 | } | |
4428 | } | |
20afbda2 | 4429 | |
fca52a55 DV |
4430 | /** |
4431 | * intel_hpd_init - initializes and enables hpd support | |
4432 | * @dev_priv: i915 device instance | |
4433 | * | |
4434 | * This function enables the hotplug support. It requires that interrupts have | |
4435 | * already been enabled with intel_irq_init_hw(). From this point on hotplug and | |
4436 | * poll request can run concurrently to other code, so locking rules must be | |
4437 | * obeyed. | |
4438 | * | |
4439 | * This is a separate step from interrupt enabling to simplify the locking rules | |
4440 | * in the driver load and resume code. | |
4441 | */ | |
b963291c | 4442 | void intel_hpd_init(struct drm_i915_private *dev_priv) |
20afbda2 | 4443 | { |
b963291c | 4444 | struct drm_device *dev = dev_priv->dev; |
821450c6 EE |
4445 | struct drm_mode_config *mode_config = &dev->mode_config; |
4446 | struct drm_connector *connector; | |
4447 | int i; | |
20afbda2 | 4448 | |
821450c6 EE |
4449 | for (i = 1; i < HPD_NUM_PINS; i++) { |
4450 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
4451 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
4452 | } | |
4453 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
4454 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
4455 | connector->polled = intel_connector->polled; | |
0e32b39c DA |
4456 | if (connector->encoder && !connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) |
4457 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
4458 | if (intel_connector->mst_port) | |
821450c6 EE |
4459 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
4460 | } | |
b5ea2d56 DV |
4461 | |
4462 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
4463 | * just to make the assert_spin_locked checks happy. */ | |
d6207435 | 4464 | spin_lock_irq(&dev_priv->irq_lock); |
20afbda2 DV |
4465 | if (dev_priv->display.hpd_irq_setup) |
4466 | dev_priv->display.hpd_irq_setup(dev); | |
d6207435 | 4467 | spin_unlock_irq(&dev_priv->irq_lock); |
20afbda2 | 4468 | } |
c67a470b | 4469 | |
fca52a55 DV |
4470 | /** |
4471 | * intel_irq_install - enables the hardware interrupt | |
4472 | * @dev_priv: i915 device instance | |
4473 | * | |
4474 | * This function enables the hardware interrupt handling, but leaves the hotplug | |
4475 | * handling still disabled. It is called after intel_irq_init(). | |
4476 | * | |
4477 | * In the driver load and resume code we need working interrupts in a few places | |
4478 | * but don't want to deal with the hassle of concurrent probe and hotplug | |
4479 | * workers. Hence the split into this two-stage approach. | |
4480 | */ | |
2aeb7d3a DV |
4481 | int intel_irq_install(struct drm_i915_private *dev_priv) |
4482 | { | |
4483 | /* | |
4484 | * We enable some interrupt sources in our postinstall hooks, so mark | |
4485 | * interrupts as enabled _before_ actually enabling them to avoid | |
4486 | * special cases in our ordering checks. | |
4487 | */ | |
4488 | dev_priv->pm.irqs_enabled = true; | |
4489 | ||
4490 | return drm_irq_install(dev_priv->dev, dev_priv->dev->pdev->irq); | |
4491 | } | |
4492 | ||
fca52a55 DV |
4493 | /** |
4494 | * intel_irq_uninstall - finilizes all irq handling | |
4495 | * @dev_priv: i915 device instance | |
4496 | * | |
4497 | * This stops interrupt and hotplug handling and unregisters and frees all | |
4498 | * resources acquired in the init functions. | |
4499 | */ | |
2aeb7d3a DV |
4500 | void intel_irq_uninstall(struct drm_i915_private *dev_priv) |
4501 | { | |
4502 | drm_irq_uninstall(dev_priv->dev); | |
4503 | intel_hpd_cancel_work(dev_priv); | |
4504 | dev_priv->pm.irqs_enabled = false; | |
4505 | } | |
4506 | ||
fca52a55 DV |
4507 | /** |
4508 | * intel_runtime_pm_disable_interrupts - runtime interrupt disabling | |
4509 | * @dev_priv: i915 device instance | |
4510 | * | |
4511 | * This function is used to disable interrupts at runtime, both in the runtime | |
4512 | * pm and the system suspend/resume code. | |
4513 | */ | |
b963291c | 4514 | void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv) |
c67a470b | 4515 | { |
b963291c | 4516 | dev_priv->dev->driver->irq_uninstall(dev_priv->dev); |
2aeb7d3a | 4517 | dev_priv->pm.irqs_enabled = false; |
c67a470b PZ |
4518 | } |
4519 | ||
fca52a55 DV |
4520 | /** |
4521 | * intel_runtime_pm_enable_interrupts - runtime interrupt enabling | |
4522 | * @dev_priv: i915 device instance | |
4523 | * | |
4524 | * This function is used to enable interrupts at runtime, both in the runtime | |
4525 | * pm and the system suspend/resume code. | |
4526 | */ | |
b963291c | 4527 | void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv) |
c67a470b | 4528 | { |
2aeb7d3a | 4529 | dev_priv->pm.irqs_enabled = true; |
b963291c DV |
4530 | dev_priv->dev->driver->irq_preinstall(dev_priv->dev); |
4531 | dev_priv->dev->driver->irq_postinstall(dev_priv->dev); | |
c67a470b | 4532 | } |