drm/i915: fix locking around ironlake_enable|disable_display_irq
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
CommitLineData
0d6aa60b 1/* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
1da177e4 2 */
0d6aa60b 3/*
1da177e4
LT
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
bc54fd1a
DA
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
0d6aa60b 27 */
1da177e4 28
a70491cc
JP
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
63eeaf38 31#include <linux/sysrq.h>
5a0e3ad6 32#include <linux/slab.h>
760285e7
DH
33#include <drm/drmP.h>
34#include <drm/i915_drm.h>
1da177e4 35#include "i915_drv.h"
1c5d22f7 36#include "i915_trace.h"
79e53945 37#include "intel_drv.h"
1da177e4 38
e5868a31
EE
39static const u32 hpd_ibx[] = {
40 [HPD_CRT] = SDE_CRT_HOTPLUG,
41 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
42 [HPD_PORT_B] = SDE_PORTB_HOTPLUG,
43 [HPD_PORT_C] = SDE_PORTC_HOTPLUG,
44 [HPD_PORT_D] = SDE_PORTD_HOTPLUG
45};
46
47static const u32 hpd_cpt[] = {
48 [HPD_CRT] = SDE_CRT_HOTPLUG_CPT,
73c352a2 49 [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
e5868a31
EE
50 [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
51 [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
52 [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
53};
54
55static const u32 hpd_mask_i915[] = {
56 [HPD_CRT] = CRT_HOTPLUG_INT_EN,
57 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN,
58 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
59 [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
60 [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
61 [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
62};
63
64static const u32 hpd_status_gen4[] = {
65 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
66 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X,
67 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
68 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
69 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
70 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
71};
72
e5868a31
EE
73static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */
74 [HPD_CRT] = CRT_HOTPLUG_INT_STATUS,
75 [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915,
76 [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
77 [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
78 [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
79 [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
80};
81
cd569aed
EE
82static void ibx_hpd_irq_setup(struct drm_device *dev);
83static void i915_hpd_irq_setup(struct drm_device *dev);
e5868a31 84
036a4a7d 85/* For display hotplug interrupt */
995b6762 86static void
f2b115e6 87ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d 88{
4bc9d430
DV
89 assert_spin_locked(&dev_priv->irq_lock);
90
1ec14ad3
CW
91 if ((dev_priv->irq_mask & mask) != 0) {
92 dev_priv->irq_mask &= ~mask;
93 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 94 POSTING_READ(DEIMR);
036a4a7d
ZW
95 }
96}
97
0ff9800a 98static void
f2b115e6 99ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
036a4a7d 100{
4bc9d430
DV
101 assert_spin_locked(&dev_priv->irq_lock);
102
1ec14ad3
CW
103 if ((dev_priv->irq_mask & mask) != mask) {
104 dev_priv->irq_mask |= mask;
105 I915_WRITE(DEIMR, dev_priv->irq_mask);
3143a2bf 106 POSTING_READ(DEIMR);
036a4a7d
ZW
107 }
108}
109
8664281b
PZ
110static bool ivb_can_enable_err_int(struct drm_device *dev)
111{
112 struct drm_i915_private *dev_priv = dev->dev_private;
113 struct intel_crtc *crtc;
114 enum pipe pipe;
115
4bc9d430
DV
116 assert_spin_locked(&dev_priv->irq_lock);
117
8664281b
PZ
118 for_each_pipe(pipe) {
119 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
120
121 if (crtc->cpu_fifo_underrun_disabled)
122 return false;
123 }
124
125 return true;
126}
127
128static bool cpt_can_enable_serr_int(struct drm_device *dev)
129{
130 struct drm_i915_private *dev_priv = dev->dev_private;
131 enum pipe pipe;
132 struct intel_crtc *crtc;
133
134 for_each_pipe(pipe) {
135 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
136
137 if (crtc->pch_fifo_underrun_disabled)
138 return false;
139 }
140
141 return true;
142}
143
144static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev,
145 enum pipe pipe, bool enable)
146{
147 struct drm_i915_private *dev_priv = dev->dev_private;
148 uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN :
149 DE_PIPEB_FIFO_UNDERRUN;
150
151 if (enable)
152 ironlake_enable_display_irq(dev_priv, bit);
153 else
154 ironlake_disable_display_irq(dev_priv, bit);
155}
156
157static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev,
158 bool enable)
159{
160 struct drm_i915_private *dev_priv = dev->dev_private;
161
162 if (enable) {
163 if (!ivb_can_enable_err_int(dev))
164 return;
165
166 I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN_A |
167 ERR_INT_FIFO_UNDERRUN_B |
168 ERR_INT_FIFO_UNDERRUN_C);
169
170 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
171 } else {
172 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
173 }
174}
175
176static void ibx_set_fifo_underrun_reporting(struct intel_crtc *crtc,
177 bool enable)
178{
179 struct drm_device *dev = crtc->base.dev;
180 struct drm_i915_private *dev_priv = dev->dev_private;
181 uint32_t bit = (crtc->pipe == PIPE_A) ? SDE_TRANSA_FIFO_UNDER :
182 SDE_TRANSB_FIFO_UNDER;
183
184 if (enable)
185 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~bit);
186 else
187 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | bit);
188
189 POSTING_READ(SDEIMR);
190}
191
192static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
193 enum transcoder pch_transcoder,
194 bool enable)
195{
196 struct drm_i915_private *dev_priv = dev->dev_private;
197
198 if (enable) {
199 if (!cpt_can_enable_serr_int(dev))
200 return;
201
202 I915_WRITE(SERR_INT, SERR_INT_TRANS_A_FIFO_UNDERRUN |
203 SERR_INT_TRANS_B_FIFO_UNDERRUN |
204 SERR_INT_TRANS_C_FIFO_UNDERRUN);
205
206 I915_WRITE(SDEIMR, I915_READ(SDEIMR) & ~SDE_ERROR_CPT);
207 } else {
208 I915_WRITE(SDEIMR, I915_READ(SDEIMR) | SDE_ERROR_CPT);
209 }
210
211 POSTING_READ(SDEIMR);
212}
213
214/**
215 * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages
216 * @dev: drm device
217 * @pipe: pipe
218 * @enable: true if we want to report FIFO underrun errors, false otherwise
219 *
220 * This function makes us disable or enable CPU fifo underruns for a specific
221 * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun
222 * reporting for one pipe may also disable all the other CPU error interruts for
223 * the other pipes, due to the fact that there's just one interrupt mask/enable
224 * bit for all the pipes.
225 *
226 * Returns the previous state of underrun reporting.
227 */
228bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev,
229 enum pipe pipe, bool enable)
230{
231 struct drm_i915_private *dev_priv = dev->dev_private;
232 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
233 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
234 unsigned long flags;
235 bool ret;
236
237 spin_lock_irqsave(&dev_priv->irq_lock, flags);
238
239 ret = !intel_crtc->cpu_fifo_underrun_disabled;
240
241 if (enable == ret)
242 goto done;
243
244 intel_crtc->cpu_fifo_underrun_disabled = !enable;
245
246 if (IS_GEN5(dev) || IS_GEN6(dev))
247 ironlake_set_fifo_underrun_reporting(dev, pipe, enable);
248 else if (IS_GEN7(dev))
249 ivybridge_set_fifo_underrun_reporting(dev, enable);
250
251done:
252 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
253 return ret;
254}
255
256/**
257 * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages
258 * @dev: drm device
259 * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older)
260 * @enable: true if we want to report FIFO underrun errors, false otherwise
261 *
262 * This function makes us disable or enable PCH fifo underruns for a specific
263 * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO
264 * underrun reporting for one transcoder may also disable all the other PCH
265 * error interruts for the other transcoders, due to the fact that there's just
266 * one interrupt mask/enable bit for all the transcoders.
267 *
268 * Returns the previous state of underrun reporting.
269 */
270bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev,
271 enum transcoder pch_transcoder,
272 bool enable)
273{
274 struct drm_i915_private *dev_priv = dev->dev_private;
275 enum pipe p;
276 struct drm_crtc *crtc;
277 struct intel_crtc *intel_crtc;
278 unsigned long flags;
279 bool ret;
280
281 if (HAS_PCH_LPT(dev)) {
282 crtc = NULL;
283 for_each_pipe(p) {
284 struct drm_crtc *c = dev_priv->pipe_to_crtc_mapping[p];
285 if (intel_pipe_has_type(c, INTEL_OUTPUT_ANALOG)) {
286 crtc = c;
287 break;
288 }
289 }
290 if (!crtc) {
291 DRM_ERROR("PCH FIFO underrun, but no CRTC using the PCH found\n");
292 return false;
293 }
294 } else {
295 crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder];
296 }
297 intel_crtc = to_intel_crtc(crtc);
298
299 spin_lock_irqsave(&dev_priv->irq_lock, flags);
300
301 ret = !intel_crtc->pch_fifo_underrun_disabled;
302
303 if (enable == ret)
304 goto done;
305
306 intel_crtc->pch_fifo_underrun_disabled = !enable;
307
308 if (HAS_PCH_IBX(dev))
309 ibx_set_fifo_underrun_reporting(intel_crtc, enable);
310 else
311 cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable);
312
313done:
314 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
315 return ret;
316}
317
318
7c463586
KP
319void
320i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
321{
46c06a30
VS
322 u32 reg = PIPESTAT(pipe);
323 u32 pipestat = I915_READ(reg) & 0x7fff0000;
7c463586 324
46c06a30
VS
325 if ((pipestat & mask) == mask)
326 return;
327
328 /* Enable the interrupt, clear any pending status */
329 pipestat |= mask | (mask >> 16);
330 I915_WRITE(reg, pipestat);
331 POSTING_READ(reg);
7c463586
KP
332}
333
334void
335i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
336{
46c06a30
VS
337 u32 reg = PIPESTAT(pipe);
338 u32 pipestat = I915_READ(reg) & 0x7fff0000;
7c463586 339
46c06a30
VS
340 if ((pipestat & mask) == 0)
341 return;
342
343 pipestat &= ~mask;
344 I915_WRITE(reg, pipestat);
345 POSTING_READ(reg);
7c463586
KP
346}
347
01c66889 348/**
f49e38dd 349 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion
01c66889 350 */
f49e38dd 351static void i915_enable_asle_pipestat(struct drm_device *dev)
01c66889 352{
1ec14ad3
CW
353 drm_i915_private_t *dev_priv = dev->dev_private;
354 unsigned long irqflags;
355
f49e38dd
JN
356 if (!dev_priv->opregion.asle || !IS_MOBILE(dev))
357 return;
358
1ec14ad3 359 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
01c66889 360
f898780b
JN
361 i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE);
362 if (INTEL_INFO(dev)->gen >= 4)
363 i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE);
1ec14ad3
CW
364
365 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
01c66889
ZY
366}
367
0a3e67a4
JB
368/**
369 * i915_pipe_enabled - check if a pipe is enabled
370 * @dev: DRM device
371 * @pipe: pipe to check
372 *
373 * Reading certain registers when the pipe is disabled can hang the chip.
374 * Use this routine to make sure the PLL is running and the pipe is active
375 * before reading such registers if unsure.
376 */
377static int
378i915_pipe_enabled(struct drm_device *dev, int pipe)
379{
380 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
702e7a56 381
a01025af
DV
382 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
383 /* Locking is horribly broken here, but whatever. */
384 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
385 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
71f8ba6b 386
a01025af
DV
387 return intel_crtc->active;
388 } else {
389 return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
390 }
0a3e67a4
JB
391}
392
42f52ef8
KP
393/* Called from drm generic code, passed a 'crtc', which
394 * we use as a pipe index
395 */
f71d4af4 396static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
0a3e67a4
JB
397{
398 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
399 unsigned long high_frame;
400 unsigned long low_frame;
5eddb70b 401 u32 high1, high2, low;
0a3e67a4
JB
402
403 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 404 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 405 "pipe %c\n", pipe_name(pipe));
0a3e67a4
JB
406 return 0;
407 }
408
9db4a9c7
JB
409 high_frame = PIPEFRAME(pipe);
410 low_frame = PIPEFRAMEPIXEL(pipe);
5eddb70b 411
0a3e67a4
JB
412 /*
413 * High & low register fields aren't synchronized, so make sure
414 * we get a low value that's stable across two reads of the high
415 * register.
416 */
417 do {
5eddb70b
CW
418 high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
419 low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK;
420 high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
0a3e67a4
JB
421 } while (high1 != high2);
422
5eddb70b
CW
423 high1 >>= PIPE_FRAME_HIGH_SHIFT;
424 low >>= PIPE_FRAME_LOW_SHIFT;
425 return (high1 << 8) | low;
0a3e67a4
JB
426}
427
f71d4af4 428static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
9880b7a5
JB
429{
430 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 431 int reg = PIPE_FRMCOUNT_GM45(pipe);
9880b7a5
JB
432
433 if (!i915_pipe_enabled(dev, pipe)) {
44d98a61 434 DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
9db4a9c7 435 "pipe %c\n", pipe_name(pipe));
9880b7a5
JB
436 return 0;
437 }
438
439 return I915_READ(reg);
440}
441
f71d4af4 442static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
0af7e4df
MK
443 int *vpos, int *hpos)
444{
445 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
446 u32 vbl = 0, position = 0;
447 int vbl_start, vbl_end, htotal, vtotal;
448 bool in_vbl = true;
449 int ret = 0;
fe2b8f9d
PZ
450 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
451 pipe);
0af7e4df
MK
452
453 if (!i915_pipe_enabled(dev, pipe)) {
454 DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
9db4a9c7 455 "pipe %c\n", pipe_name(pipe));
0af7e4df
MK
456 return 0;
457 }
458
459 /* Get vtotal. */
fe2b8f9d 460 vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
0af7e4df
MK
461
462 if (INTEL_INFO(dev)->gen >= 4) {
463 /* No obvious pixelcount register. Only query vertical
464 * scanout position from Display scan line register.
465 */
466 position = I915_READ(PIPEDSL(pipe));
467
468 /* Decode into vertical scanout position. Don't have
469 * horizontal scanout position.
470 */
471 *vpos = position & 0x1fff;
472 *hpos = 0;
473 } else {
474 /* Have access to pixelcount since start of frame.
475 * We can split this into vertical and horizontal
476 * scanout position.
477 */
478 position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
479
fe2b8f9d 480 htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff);
0af7e4df
MK
481 *vpos = position / htotal;
482 *hpos = position - (*vpos * htotal);
483 }
484
485 /* Query vblank area. */
fe2b8f9d 486 vbl = I915_READ(VBLANK(cpu_transcoder));
0af7e4df
MK
487
488 /* Test position against vblank region. */
489 vbl_start = vbl & 0x1fff;
490 vbl_end = (vbl >> 16) & 0x1fff;
491
492 if ((*vpos < vbl_start) || (*vpos > vbl_end))
493 in_vbl = false;
494
495 /* Inside "upper part" of vblank area? Apply corrective offset: */
496 if (in_vbl && (*vpos >= vbl_start))
497 *vpos = *vpos - vtotal;
498
499 /* Readouts valid? */
500 if (vbl > 0)
501 ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
502
503 /* In vblank? */
504 if (in_vbl)
505 ret |= DRM_SCANOUTPOS_INVBL;
506
507 return ret;
508}
509
f71d4af4 510static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe,
0af7e4df
MK
511 int *max_error,
512 struct timeval *vblank_time,
513 unsigned flags)
514{
4041b853 515 struct drm_crtc *crtc;
0af7e4df 516
7eb552ae 517 if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) {
4041b853 518 DRM_ERROR("Invalid crtc %d\n", pipe);
0af7e4df
MK
519 return -EINVAL;
520 }
521
522 /* Get drm_crtc to timestamp: */
4041b853
CW
523 crtc = intel_get_crtc_for_pipe(dev, pipe);
524 if (crtc == NULL) {
525 DRM_ERROR("Invalid crtc %d\n", pipe);
526 return -EINVAL;
527 }
528
529 if (!crtc->enabled) {
530 DRM_DEBUG_KMS("crtc %d is disabled\n", pipe);
531 return -EBUSY;
532 }
0af7e4df
MK
533
534 /* Helper routine in DRM core does all the work: */
4041b853
CW
535 return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error,
536 vblank_time, flags,
537 crtc);
0af7e4df
MK
538}
539
321a1b30
EE
540static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector)
541{
542 enum drm_connector_status old_status;
543
544 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
545 old_status = connector->status;
546
547 connector->status = connector->funcs->detect(connector, false);
548 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
549 connector->base.id,
550 drm_get_connector_name(connector),
551 old_status, connector->status);
552 return (old_status != connector->status);
553}
554
5ca58282
JB
555/*
556 * Handle hotplug events outside the interrupt handler proper.
557 */
ac4c16c5
EE
558#define I915_REENABLE_HOTPLUG_DELAY (2*60*1000)
559
5ca58282
JB
560static void i915_hotplug_work_func(struct work_struct *work)
561{
562 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
563 hotplug_work);
564 struct drm_device *dev = dev_priv->dev;
c31c4ba3 565 struct drm_mode_config *mode_config = &dev->mode_config;
cd569aed
EE
566 struct intel_connector *intel_connector;
567 struct intel_encoder *intel_encoder;
568 struct drm_connector *connector;
569 unsigned long irqflags;
570 bool hpd_disabled = false;
321a1b30 571 bool changed = false;
142e2398 572 u32 hpd_event_bits;
4ef69c7a 573
52d7eced
DV
574 /* HPD irq before everything is fully set up. */
575 if (!dev_priv->enable_hotplug_processing)
576 return;
577
a65e34c7 578 mutex_lock(&mode_config->mutex);
e67189ab
JB
579 DRM_DEBUG_KMS("running encoder hotplug functions\n");
580
cd569aed 581 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
142e2398
EE
582
583 hpd_event_bits = dev_priv->hpd_event_bits;
584 dev_priv->hpd_event_bits = 0;
cd569aed
EE
585 list_for_each_entry(connector, &mode_config->connector_list, head) {
586 intel_connector = to_intel_connector(connector);
587 intel_encoder = intel_connector->encoder;
588 if (intel_encoder->hpd_pin > HPD_NONE &&
589 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED &&
590 connector->polled == DRM_CONNECTOR_POLL_HPD) {
591 DRM_INFO("HPD interrupt storm detected on connector %s: "
592 "switching from hotplug detection to polling\n",
593 drm_get_connector_name(connector));
594 dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED;
595 connector->polled = DRM_CONNECTOR_POLL_CONNECT
596 | DRM_CONNECTOR_POLL_DISCONNECT;
597 hpd_disabled = true;
598 }
142e2398
EE
599 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
600 DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
601 drm_get_connector_name(connector), intel_encoder->hpd_pin);
602 }
cd569aed
EE
603 }
604 /* if there were no outputs to poll, poll was disabled,
605 * therefore make sure it's enabled when disabling HPD on
606 * some connectors */
ac4c16c5 607 if (hpd_disabled) {
cd569aed 608 drm_kms_helper_poll_enable(dev);
ac4c16c5
EE
609 mod_timer(&dev_priv->hotplug_reenable_timer,
610 jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY));
611 }
cd569aed
EE
612
613 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
614
321a1b30
EE
615 list_for_each_entry(connector, &mode_config->connector_list, head) {
616 intel_connector = to_intel_connector(connector);
617 intel_encoder = intel_connector->encoder;
618 if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) {
619 if (intel_encoder->hot_plug)
620 intel_encoder->hot_plug(intel_encoder);
621 if (intel_hpd_irq_event(dev, connector))
622 changed = true;
623 }
624 }
40ee3381
KP
625 mutex_unlock(&mode_config->mutex);
626
321a1b30
EE
627 if (changed)
628 drm_kms_helper_hotplug_event(dev);
5ca58282
JB
629}
630
73edd18f 631static void ironlake_handle_rps_change(struct drm_device *dev)
f97108d1
JB
632{
633 drm_i915_private_t *dev_priv = dev->dev_private;
b5b72e89 634 u32 busy_up, busy_down, max_avg, min_avg;
9270388e
DV
635 u8 new_delay;
636 unsigned long flags;
637
638 spin_lock_irqsave(&mchdev_lock, flags);
f97108d1 639
73edd18f
DV
640 I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
641
20e4d407 642 new_delay = dev_priv->ips.cur_delay;
9270388e 643
7648fa99 644 I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG);
b5b72e89
MG
645 busy_up = I915_READ(RCPREVBSYTUPAVG);
646 busy_down = I915_READ(RCPREVBSYTDNAVG);
f97108d1
JB
647 max_avg = I915_READ(RCBMAXAVG);
648 min_avg = I915_READ(RCBMINAVG);
649
650 /* Handle RCS change request from hw */
b5b72e89 651 if (busy_up > max_avg) {
20e4d407
DV
652 if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay)
653 new_delay = dev_priv->ips.cur_delay - 1;
654 if (new_delay < dev_priv->ips.max_delay)
655 new_delay = dev_priv->ips.max_delay;
b5b72e89 656 } else if (busy_down < min_avg) {
20e4d407
DV
657 if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay)
658 new_delay = dev_priv->ips.cur_delay + 1;
659 if (new_delay > dev_priv->ips.min_delay)
660 new_delay = dev_priv->ips.min_delay;
f97108d1
JB
661 }
662
7648fa99 663 if (ironlake_set_drps(dev, new_delay))
20e4d407 664 dev_priv->ips.cur_delay = new_delay;
f97108d1 665
9270388e
DV
666 spin_unlock_irqrestore(&mchdev_lock, flags);
667
f97108d1
JB
668 return;
669}
670
549f7365
CW
671static void notify_ring(struct drm_device *dev,
672 struct intel_ring_buffer *ring)
673{
674 struct drm_i915_private *dev_priv = dev->dev_private;
9862e600 675
475553de
CW
676 if (ring->obj == NULL)
677 return;
678
b2eadbc8 679 trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false));
9862e600 680
549f7365 681 wake_up_all(&ring->irq_queue);
3e0dc6b0 682 if (i915_enable_hangcheck) {
99584db3 683 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
cecc21fe 684 round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
3e0dc6b0 685 }
549f7365
CW
686}
687
4912d041 688static void gen6_pm_rps_work(struct work_struct *work)
3b8d8d91 689{
4912d041 690 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
c6a828d3 691 rps.work);
4912d041 692 u32 pm_iir, pm_imr;
7b9e0ae6 693 u8 new_delay;
4912d041 694
c6a828d3
DV
695 spin_lock_irq(&dev_priv->rps.lock);
696 pm_iir = dev_priv->rps.pm_iir;
697 dev_priv->rps.pm_iir = 0;
4912d041 698 pm_imr = I915_READ(GEN6_PMIMR);
4848405c
BW
699 /* Make sure not to corrupt PMIMR state used by ringbuffer code */
700 I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS);
c6a828d3 701 spin_unlock_irq(&dev_priv->rps.lock);
3b8d8d91 702
4848405c 703 if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0)
3b8d8d91
JB
704 return;
705
4fc688ce 706 mutex_lock(&dev_priv->rps.hw_lock);
7b9e0ae6 707
7425034a 708 if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
c6a828d3 709 new_delay = dev_priv->rps.cur_delay + 1;
7425034a
VS
710
711 /*
712 * For better performance, jump directly
713 * to RPe if we're below it.
714 */
715 if (IS_VALLEYVIEW(dev_priv->dev) &&
716 dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay)
717 new_delay = dev_priv->rps.rpe_delay;
718 } else
c6a828d3 719 new_delay = dev_priv->rps.cur_delay - 1;
3b8d8d91 720
79249636
BW
721 /* sysfs frequency interfaces may have snuck in while servicing the
722 * interrupt
723 */
d8289c9e
VS
724 if (new_delay >= dev_priv->rps.min_delay &&
725 new_delay <= dev_priv->rps.max_delay) {
0a073b84
JB
726 if (IS_VALLEYVIEW(dev_priv->dev))
727 valleyview_set_rps(dev_priv->dev, new_delay);
728 else
729 gen6_set_rps(dev_priv->dev, new_delay);
79249636 730 }
3b8d8d91 731
52ceb908
JB
732 if (IS_VALLEYVIEW(dev_priv->dev)) {
733 /*
734 * On VLV, when we enter RC6 we may not be at the minimum
735 * voltage level, so arm a timer to check. It should only
736 * fire when there's activity or once after we've entered
737 * RC6, and then won't be re-armed until the next RPS interrupt.
738 */
739 mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work,
740 msecs_to_jiffies(100));
741 }
742
4fc688ce 743 mutex_unlock(&dev_priv->rps.hw_lock);
3b8d8d91
JB
744}
745
e3689190
BW
746
747/**
748 * ivybridge_parity_work - Workqueue called when a parity error interrupt
749 * occurred.
750 * @work: workqueue struct
751 *
752 * Doesn't actually do anything except notify userspace. As a consequence of
753 * this event, userspace should try to remap the bad rows since statistically
754 * it is likely the same row is more likely to go bad again.
755 */
756static void ivybridge_parity_work(struct work_struct *work)
757{
758 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
a4da4fa4 759 l3_parity.error_work);
e3689190
BW
760 u32 error_status, row, bank, subbank;
761 char *parity_event[5];
762 uint32_t misccpctl;
763 unsigned long flags;
764
765 /* We must turn off DOP level clock gating to access the L3 registers.
766 * In order to prevent a get/put style interface, acquire struct mutex
767 * any time we access those registers.
768 */
769 mutex_lock(&dev_priv->dev->struct_mutex);
770
771 misccpctl = I915_READ(GEN7_MISCCPCTL);
772 I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE);
773 POSTING_READ(GEN7_MISCCPCTL);
774
775 error_status = I915_READ(GEN7_L3CDERRST1);
776 row = GEN7_PARITY_ERROR_ROW(error_status);
777 bank = GEN7_PARITY_ERROR_BANK(error_status);
778 subbank = GEN7_PARITY_ERROR_SUBBANK(error_status);
779
780 I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID |
781 GEN7_L3CDERRST1_ENABLE);
782 POSTING_READ(GEN7_L3CDERRST1);
783
784 I915_WRITE(GEN7_MISCCPCTL, misccpctl);
785
786 spin_lock_irqsave(&dev_priv->irq_lock, flags);
cc609d5d 787 dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
e3689190
BW
788 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
789 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
790
791 mutex_unlock(&dev_priv->dev->struct_mutex);
792
793 parity_event[0] = "L3_PARITY_ERROR=1";
794 parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row);
795 parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank);
796 parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank);
797 parity_event[4] = NULL;
798
799 kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj,
800 KOBJ_CHANGE, parity_event);
801
802 DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n",
803 row, bank, subbank);
804
805 kfree(parity_event[3]);
806 kfree(parity_event[2]);
807 kfree(parity_event[1]);
808}
809
d2ba8470 810static void ivybridge_handle_parity_error(struct drm_device *dev)
e3689190
BW
811{
812 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
813 unsigned long flags;
814
e1ef7cc2 815 if (!HAS_L3_GPU_CACHE(dev))
e3689190
BW
816 return;
817
818 spin_lock_irqsave(&dev_priv->irq_lock, flags);
cc609d5d 819 dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
e3689190
BW
820 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
821 spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
822
a4da4fa4 823 queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work);
e3689190
BW
824}
825
e7b4c6b1
DV
826static void snb_gt_irq_handler(struct drm_device *dev,
827 struct drm_i915_private *dev_priv,
828 u32 gt_iir)
829{
830
cc609d5d
BW
831 if (gt_iir &
832 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
e7b4c6b1 833 notify_ring(dev, &dev_priv->ring[RCS]);
cc609d5d 834 if (gt_iir & GT_BSD_USER_INTERRUPT)
e7b4c6b1 835 notify_ring(dev, &dev_priv->ring[VCS]);
cc609d5d 836 if (gt_iir & GT_BLT_USER_INTERRUPT)
e7b4c6b1
DV
837 notify_ring(dev, &dev_priv->ring[BCS]);
838
cc609d5d
BW
839 if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
840 GT_BSD_CS_ERROR_INTERRUPT |
841 GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) {
e7b4c6b1
DV
842 DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir);
843 i915_handle_error(dev, false);
844 }
e3689190 845
cc609d5d 846 if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT)
e3689190 847 ivybridge_handle_parity_error(dev);
e7b4c6b1
DV
848}
849
baf02a1f 850/* Legacy way of handling PM interrupts */
fc6826d1
CW
851static void gen6_queue_rps_work(struct drm_i915_private *dev_priv,
852 u32 pm_iir)
853{
854 unsigned long flags;
855
856 /*
857 * IIR bits should never already be set because IMR should
858 * prevent an interrupt from being shown in IIR. The warning
859 * displays a case where we've unsafely cleared
c6a828d3 860 * dev_priv->rps.pm_iir. Although missing an interrupt of the same
fc6826d1
CW
861 * type is not a problem, it displays a problem in the logic.
862 *
c6a828d3 863 * The mask bit in IMR is cleared by dev_priv->rps.work.
fc6826d1
CW
864 */
865
c6a828d3 866 spin_lock_irqsave(&dev_priv->rps.lock, flags);
c6a828d3
DV
867 dev_priv->rps.pm_iir |= pm_iir;
868 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
fc6826d1 869 POSTING_READ(GEN6_PMIMR);
c6a828d3 870 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
fc6826d1 871
c6a828d3 872 queue_work(dev_priv->wq, &dev_priv->rps.work);
fc6826d1
CW
873}
874
b543fb04
EE
875#define HPD_STORM_DETECT_PERIOD 1000
876#define HPD_STORM_THRESHOLD 5
877
cd569aed 878static inline bool hotplug_irq_storm_detect(struct drm_device *dev,
b543fb04
EE
879 u32 hotplug_trigger,
880 const u32 *hpd)
881{
882 drm_i915_private_t *dev_priv = dev->dev_private;
883 unsigned long irqflags;
884 int i;
cd569aed 885 bool ret = false;
b543fb04
EE
886
887 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
888
889 for (i = 1; i < HPD_NUM_PINS; i++) {
821450c6 890
b543fb04
EE
891 if (!(hpd[i] & hotplug_trigger) ||
892 dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
893 continue;
894
bc5ead8c 895 dev_priv->hpd_event_bits |= (1 << i);
b543fb04
EE
896 if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies,
897 dev_priv->hpd_stats[i].hpd_last_jiffies
898 + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) {
899 dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies;
900 dev_priv->hpd_stats[i].hpd_cnt = 0;
901 } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) {
902 dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED;
142e2398 903 dev_priv->hpd_event_bits &= ~(1 << i);
b543fb04 904 DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i);
cd569aed 905 ret = true;
b543fb04
EE
906 } else {
907 dev_priv->hpd_stats[i].hpd_cnt++;
908 }
909 }
910
911 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
cd569aed
EE
912
913 return ret;
b543fb04
EE
914}
915
515ac2bb
DV
916static void gmbus_irq_handler(struct drm_device *dev)
917{
28c70f16
DV
918 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
919
28c70f16 920 wake_up_all(&dev_priv->gmbus_wait_queue);
515ac2bb
DV
921}
922
ce99c256
DV
923static void dp_aux_irq_handler(struct drm_device *dev)
924{
9ee32fea
DV
925 struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private;
926
9ee32fea 927 wake_up_all(&dev_priv->gmbus_wait_queue);
ce99c256
DV
928}
929
baf02a1f
BW
930/* Unlike gen6_queue_rps_work() from which this function is originally derived,
931 * we must be able to deal with other PM interrupts. This is complicated because
932 * of the way in which we use the masks to defer the RPS work (which for
933 * posterity is necessary because of forcewake).
934 */
935static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv,
936 u32 pm_iir)
937{
938 unsigned long flags;
939
940 spin_lock_irqsave(&dev_priv->rps.lock, flags);
4848405c 941 dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS;
baf02a1f
BW
942 if (dev_priv->rps.pm_iir) {
943 I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir);
944 /* never want to mask useful interrupts. (also posting read) */
4848405c 945 WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS);
baf02a1f
BW
946 /* TODO: if queue_work is slow, move it out of the spinlock */
947 queue_work(dev_priv->wq, &dev_priv->rps.work);
948 }
949 spin_unlock_irqrestore(&dev_priv->rps.lock, flags);
950
12638c57
BW
951 if (pm_iir & ~GEN6_PM_RPS_EVENTS) {
952 if (pm_iir & PM_VEBOX_USER_INTERRUPT)
953 notify_ring(dev_priv->dev, &dev_priv->ring[VECS]);
954
955 if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) {
956 DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir);
957 i915_handle_error(dev_priv->dev, false);
958 }
959 }
baf02a1f
BW
960}
961
ff1f525e 962static irqreturn_t valleyview_irq_handler(int irq, void *arg)
7e231dbe
JB
963{
964 struct drm_device *dev = (struct drm_device *) arg;
965 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
966 u32 iir, gt_iir, pm_iir;
967 irqreturn_t ret = IRQ_NONE;
968 unsigned long irqflags;
969 int pipe;
970 u32 pipe_stats[I915_MAX_PIPES];
7e231dbe
JB
971
972 atomic_inc(&dev_priv->irq_received);
973
7e231dbe
JB
974 while (true) {
975 iir = I915_READ(VLV_IIR);
976 gt_iir = I915_READ(GTIIR);
977 pm_iir = I915_READ(GEN6_PMIIR);
978
979 if (gt_iir == 0 && pm_iir == 0 && iir == 0)
980 goto out;
981
982 ret = IRQ_HANDLED;
983
e7b4c6b1 984 snb_gt_irq_handler(dev, dev_priv, gt_iir);
7e231dbe
JB
985
986 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
987 for_each_pipe(pipe) {
988 int reg = PIPESTAT(pipe);
989 pipe_stats[pipe] = I915_READ(reg);
990
991 /*
992 * Clear the PIPE*STAT regs before the IIR
993 */
994 if (pipe_stats[pipe] & 0x8000ffff) {
995 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
996 DRM_DEBUG_DRIVER("pipe %c underrun\n",
997 pipe_name(pipe));
998 I915_WRITE(reg, pipe_stats[pipe]);
999 }
1000 }
1001 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
1002
31acc7f5
JB
1003 for_each_pipe(pipe) {
1004 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
1005 drm_handle_vblank(dev, pipe);
1006
1007 if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) {
1008 intel_prepare_page_flip(dev, pipe);
1009 intel_finish_page_flip(dev, pipe);
1010 }
1011 }
1012
7e231dbe
JB
1013 /* Consume port. Then clear IIR or we'll miss events */
1014 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
1015 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
b543fb04 1016 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
7e231dbe
JB
1017
1018 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
1019 hotplug_status);
b543fb04 1020 if (hotplug_trigger) {
cd569aed
EE
1021 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
1022 i915_hpd_irq_setup(dev);
7e231dbe
JB
1023 queue_work(dev_priv->wq,
1024 &dev_priv->hotplug_work);
b543fb04 1025 }
7e231dbe
JB
1026 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
1027 I915_READ(PORT_HOTPLUG_STAT);
1028 }
1029
515ac2bb
DV
1030 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
1031 gmbus_irq_handler(dev);
7e231dbe 1032
4848405c 1033 if (pm_iir & GEN6_PM_RPS_EVENTS)
fc6826d1 1034 gen6_queue_rps_work(dev_priv, pm_iir);
7e231dbe
JB
1035
1036 I915_WRITE(GTIIR, gt_iir);
1037 I915_WRITE(GEN6_PMIIR, pm_iir);
1038 I915_WRITE(VLV_IIR, iir);
1039 }
1040
1041out:
1042 return ret;
1043}
1044
23e81d69 1045static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
776ad806
JB
1046{
1047 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 1048 int pipe;
b543fb04 1049 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
776ad806 1050
b543fb04 1051 if (hotplug_trigger) {
cd569aed
EE
1052 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_ibx))
1053 ibx_hpd_irq_setup(dev);
76e43830 1054 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
b543fb04 1055 }
cfc33bf7
VS
1056 if (pch_iir & SDE_AUDIO_POWER_MASK) {
1057 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
1058 SDE_AUDIO_POWER_SHIFT);
776ad806 1059 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
cfc33bf7
VS
1060 port_name(port));
1061 }
776ad806 1062
ce99c256
DV
1063 if (pch_iir & SDE_AUX_MASK)
1064 dp_aux_irq_handler(dev);
1065
776ad806 1066 if (pch_iir & SDE_GMBUS)
515ac2bb 1067 gmbus_irq_handler(dev);
776ad806
JB
1068
1069 if (pch_iir & SDE_AUDIO_HDCP_MASK)
1070 DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
1071
1072 if (pch_iir & SDE_AUDIO_TRANS_MASK)
1073 DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
1074
1075 if (pch_iir & SDE_POISON)
1076 DRM_ERROR("PCH poison interrupt\n");
1077
9db4a9c7
JB
1078 if (pch_iir & SDE_FDI_MASK)
1079 for_each_pipe(pipe)
1080 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1081 pipe_name(pipe),
1082 I915_READ(FDI_RX_IIR(pipe)));
776ad806
JB
1083
1084 if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
1085 DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
1086
1087 if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
1088 DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
1089
776ad806 1090 if (pch_iir & SDE_TRANSA_FIFO_UNDER)
8664281b
PZ
1091 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1092 false))
1093 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1094
1095 if (pch_iir & SDE_TRANSB_FIFO_UNDER)
1096 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1097 false))
1098 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1099}
1100
1101static void ivb_err_int_handler(struct drm_device *dev)
1102{
1103 struct drm_i915_private *dev_priv = dev->dev_private;
1104 u32 err_int = I915_READ(GEN7_ERR_INT);
1105
de032bf4
PZ
1106 if (err_int & ERR_INT_POISON)
1107 DRM_ERROR("Poison interrupt\n");
1108
8664281b
PZ
1109 if (err_int & ERR_INT_FIFO_UNDERRUN_A)
1110 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1111 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1112
1113 if (err_int & ERR_INT_FIFO_UNDERRUN_B)
1114 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1115 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1116
1117 if (err_int & ERR_INT_FIFO_UNDERRUN_C)
1118 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false))
1119 DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n");
1120
1121 I915_WRITE(GEN7_ERR_INT, err_int);
1122}
1123
1124static void cpt_serr_int_handler(struct drm_device *dev)
1125{
1126 struct drm_i915_private *dev_priv = dev->dev_private;
1127 u32 serr_int = I915_READ(SERR_INT);
1128
de032bf4
PZ
1129 if (serr_int & SERR_INT_POISON)
1130 DRM_ERROR("PCH poison interrupt\n");
1131
8664281b
PZ
1132 if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN)
1133 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A,
1134 false))
1135 DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n");
1136
1137 if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN)
1138 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B,
1139 false))
1140 DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n");
1141
1142 if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN)
1143 if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C,
1144 false))
1145 DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n");
1146
1147 I915_WRITE(SERR_INT, serr_int);
776ad806
JB
1148}
1149
23e81d69
AJ
1150static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
1151{
1152 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1153 int pipe;
b543fb04 1154 u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
23e81d69 1155
b543fb04 1156 if (hotplug_trigger) {
cd569aed
EE
1157 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_cpt))
1158 ibx_hpd_irq_setup(dev);
76e43830 1159 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
b543fb04 1160 }
cfc33bf7
VS
1161 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
1162 int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
1163 SDE_AUDIO_POWER_SHIFT_CPT);
1164 DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
1165 port_name(port));
1166 }
23e81d69
AJ
1167
1168 if (pch_iir & SDE_AUX_MASK_CPT)
ce99c256 1169 dp_aux_irq_handler(dev);
23e81d69
AJ
1170
1171 if (pch_iir & SDE_GMBUS_CPT)
515ac2bb 1172 gmbus_irq_handler(dev);
23e81d69
AJ
1173
1174 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
1175 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
1176
1177 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
1178 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
1179
1180 if (pch_iir & SDE_FDI_MASK_CPT)
1181 for_each_pipe(pipe)
1182 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
1183 pipe_name(pipe),
1184 I915_READ(FDI_RX_IIR(pipe)));
8664281b
PZ
1185
1186 if (pch_iir & SDE_ERROR_CPT)
1187 cpt_serr_int_handler(dev);
23e81d69
AJ
1188}
1189
ff1f525e 1190static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
b1f14ad0
JB
1191{
1192 struct drm_device *dev = (struct drm_device *) arg;
1193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
ab5c608b 1194 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0;
0e43406b
CW
1195 irqreturn_t ret = IRQ_NONE;
1196 int i;
b1f14ad0
JB
1197
1198 atomic_inc(&dev_priv->irq_received);
1199
8664281b
PZ
1200 /* We get interrupts on unclaimed registers, so check for this before we
1201 * do any I915_{READ,WRITE}. */
1202 if (IS_HASWELL(dev) &&
1203 (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
1204 DRM_ERROR("Unclaimed register before interrupt\n");
1205 I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
1206 }
1207
b1f14ad0
JB
1208 /* disable master interrupt before clearing iir */
1209 de_ier = I915_READ(DEIER);
1210 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
b1f14ad0 1211
44498aea
PZ
1212 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1213 * interrupts will will be stored on its back queue, and then we'll be
1214 * able to process them after we restore SDEIER (as soon as we restore
1215 * it, we'll get an interrupt if SDEIIR still has something to process
1216 * due to its back queue). */
ab5c608b
BW
1217 if (!HAS_PCH_NOP(dev)) {
1218 sde_ier = I915_READ(SDEIER);
1219 I915_WRITE(SDEIER, 0);
1220 POSTING_READ(SDEIER);
1221 }
44498aea 1222
8664281b
PZ
1223 /* On Haswell, also mask ERR_INT because we don't want to risk
1224 * generating "unclaimed register" interrupts from inside the interrupt
1225 * handler. */
4bc9d430
DV
1226 if (IS_HASWELL(dev)) {
1227 spin_lock(&dev_priv->irq_lock);
8664281b 1228 ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
4bc9d430
DV
1229 spin_unlock(&dev_priv->irq_lock);
1230 }
8664281b 1231
b1f14ad0 1232 gt_iir = I915_READ(GTIIR);
0e43406b
CW
1233 if (gt_iir) {
1234 snb_gt_irq_handler(dev, dev_priv, gt_iir);
1235 I915_WRITE(GTIIR, gt_iir);
1236 ret = IRQ_HANDLED;
b1f14ad0
JB
1237 }
1238
0e43406b
CW
1239 de_iir = I915_READ(DEIIR);
1240 if (de_iir) {
8664281b
PZ
1241 if (de_iir & DE_ERR_INT_IVB)
1242 ivb_err_int_handler(dev);
1243
ce99c256
DV
1244 if (de_iir & DE_AUX_CHANNEL_A_IVB)
1245 dp_aux_irq_handler(dev);
1246
0e43406b 1247 if (de_iir & DE_GSE_IVB)
81a07809 1248 intel_opregion_asle_intr(dev);
0e43406b
CW
1249
1250 for (i = 0; i < 3; i++) {
74d44445
DV
1251 if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i)))
1252 drm_handle_vblank(dev, i);
0e43406b
CW
1253 if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) {
1254 intel_prepare_page_flip(dev, i);
1255 intel_finish_page_flip_plane(dev, i);
1256 }
0e43406b 1257 }
b615b57a 1258
0e43406b 1259 /* check event from PCH */
ab5c608b 1260 if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) {
0e43406b 1261 u32 pch_iir = I915_READ(SDEIIR);
b1f14ad0 1262
23e81d69 1263 cpt_irq_handler(dev, pch_iir);
b1f14ad0 1264
0e43406b
CW
1265 /* clear PCH hotplug event before clear CPU irq */
1266 I915_WRITE(SDEIIR, pch_iir);
1267 }
b615b57a 1268
0e43406b
CW
1269 I915_WRITE(DEIIR, de_iir);
1270 ret = IRQ_HANDLED;
b1f14ad0
JB
1271 }
1272
0e43406b
CW
1273 pm_iir = I915_READ(GEN6_PMIIR);
1274 if (pm_iir) {
baf02a1f
BW
1275 if (IS_HASWELL(dev))
1276 hsw_pm_irq_handler(dev_priv, pm_iir);
4848405c 1277 else if (pm_iir & GEN6_PM_RPS_EVENTS)
0e43406b
CW
1278 gen6_queue_rps_work(dev_priv, pm_iir);
1279 I915_WRITE(GEN6_PMIIR, pm_iir);
1280 ret = IRQ_HANDLED;
1281 }
b1f14ad0 1282
4bc9d430
DV
1283 if (IS_HASWELL(dev)) {
1284 spin_lock(&dev_priv->irq_lock);
1285 if (ivb_can_enable_err_int(dev))
1286 ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB);
1287 spin_unlock(&dev_priv->irq_lock);
1288 }
8664281b 1289
b1f14ad0
JB
1290 I915_WRITE(DEIER, de_ier);
1291 POSTING_READ(DEIER);
ab5c608b
BW
1292 if (!HAS_PCH_NOP(dev)) {
1293 I915_WRITE(SDEIER, sde_ier);
1294 POSTING_READ(SDEIER);
1295 }
b1f14ad0
JB
1296
1297 return ret;
1298}
1299
e7b4c6b1
DV
1300static void ilk_gt_irq_handler(struct drm_device *dev,
1301 struct drm_i915_private *dev_priv,
1302 u32 gt_iir)
1303{
cc609d5d
BW
1304 if (gt_iir &
1305 (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT))
e7b4c6b1 1306 notify_ring(dev, &dev_priv->ring[RCS]);
cc609d5d 1307 if (gt_iir & ILK_BSD_USER_INTERRUPT)
e7b4c6b1
DV
1308 notify_ring(dev, &dev_priv->ring[VCS]);
1309}
1310
ff1f525e 1311static irqreturn_t ironlake_irq_handler(int irq, void *arg)
036a4a7d 1312{
4697995b 1313 struct drm_device *dev = (struct drm_device *) arg;
036a4a7d
ZW
1314 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
1315 int ret = IRQ_NONE;
44498aea 1316 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
881f47b6 1317
4697995b
JB
1318 atomic_inc(&dev_priv->irq_received);
1319
2d109a84
ZN
1320 /* disable master interrupt before clearing iir */
1321 de_ier = I915_READ(DEIER);
1322 I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
3143a2bf 1323 POSTING_READ(DEIER);
2d109a84 1324
44498aea
PZ
1325 /* Disable south interrupts. We'll only write to SDEIIR once, so further
1326 * interrupts will will be stored on its back queue, and then we'll be
1327 * able to process them after we restore SDEIER (as soon as we restore
1328 * it, we'll get an interrupt if SDEIIR still has something to process
1329 * due to its back queue). */
1330 sde_ier = I915_READ(SDEIER);
1331 I915_WRITE(SDEIER, 0);
1332 POSTING_READ(SDEIER);
1333
036a4a7d
ZW
1334 de_iir = I915_READ(DEIIR);
1335 gt_iir = I915_READ(GTIIR);
3b8d8d91 1336 pm_iir = I915_READ(GEN6_PMIIR);
036a4a7d 1337
acd15b6c 1338 if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0))
c7c85101 1339 goto done;
036a4a7d 1340
c7c85101 1341 ret = IRQ_HANDLED;
036a4a7d 1342
e7b4c6b1
DV
1343 if (IS_GEN5(dev))
1344 ilk_gt_irq_handler(dev, dev_priv, gt_iir);
1345 else
1346 snb_gt_irq_handler(dev, dev_priv, gt_iir);
01c66889 1347
ce99c256
DV
1348 if (de_iir & DE_AUX_CHANNEL_A)
1349 dp_aux_irq_handler(dev);
1350
c7c85101 1351 if (de_iir & DE_GSE)
81a07809 1352 intel_opregion_asle_intr(dev);
c650156a 1353
74d44445
DV
1354 if (de_iir & DE_PIPEA_VBLANK)
1355 drm_handle_vblank(dev, 0);
1356
1357 if (de_iir & DE_PIPEB_VBLANK)
1358 drm_handle_vblank(dev, 1);
1359
de032bf4
PZ
1360 if (de_iir & DE_POISON)
1361 DRM_ERROR("Poison interrupt\n");
1362
8664281b
PZ
1363 if (de_iir & DE_PIPEA_FIFO_UNDERRUN)
1364 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false))
1365 DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n");
1366
1367 if (de_iir & DE_PIPEB_FIFO_UNDERRUN)
1368 if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false))
1369 DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n");
1370
f072d2e7 1371 if (de_iir & DE_PLANEA_FLIP_DONE) {
013d5aa2 1372 intel_prepare_page_flip(dev, 0);
2bbda389 1373 intel_finish_page_flip_plane(dev, 0);
f072d2e7 1374 }
013d5aa2 1375
f072d2e7 1376 if (de_iir & DE_PLANEB_FLIP_DONE) {
013d5aa2 1377 intel_prepare_page_flip(dev, 1);
2bbda389 1378 intel_finish_page_flip_plane(dev, 1);
f072d2e7 1379 }
013d5aa2 1380
c7c85101 1381 /* check event from PCH */
776ad806 1382 if (de_iir & DE_PCH_EVENT) {
acd15b6c
DV
1383 u32 pch_iir = I915_READ(SDEIIR);
1384
23e81d69
AJ
1385 if (HAS_PCH_CPT(dev))
1386 cpt_irq_handler(dev, pch_iir);
1387 else
1388 ibx_irq_handler(dev, pch_iir);
acd15b6c
DV
1389
1390 /* should clear PCH hotplug event before clear CPU irq */
1391 I915_WRITE(SDEIIR, pch_iir);
776ad806 1392 }
036a4a7d 1393
73edd18f
DV
1394 if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT)
1395 ironlake_handle_rps_change(dev);
f97108d1 1396
4848405c 1397 if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS)
fc6826d1 1398 gen6_queue_rps_work(dev_priv, pm_iir);
3b8d8d91 1399
c7c85101
ZN
1400 I915_WRITE(GTIIR, gt_iir);
1401 I915_WRITE(DEIIR, de_iir);
4912d041 1402 I915_WRITE(GEN6_PMIIR, pm_iir);
c7c85101
ZN
1403
1404done:
2d109a84 1405 I915_WRITE(DEIER, de_ier);
3143a2bf 1406 POSTING_READ(DEIER);
44498aea
PZ
1407 I915_WRITE(SDEIER, sde_ier);
1408 POSTING_READ(SDEIER);
2d109a84 1409
036a4a7d
ZW
1410 return ret;
1411}
1412
8a905236
JB
1413/**
1414 * i915_error_work_func - do process context error handling work
1415 * @work: work struct
1416 *
1417 * Fire an error uevent so userspace can see that a hang or error
1418 * was detected.
1419 */
1420static void i915_error_work_func(struct work_struct *work)
1421{
1f83fee0
DV
1422 struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
1423 work);
1424 drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
1425 gpu_error);
8a905236 1426 struct drm_device *dev = dev_priv->dev;
f69061be 1427 struct intel_ring_buffer *ring;
f316a42c
BG
1428 char *error_event[] = { "ERROR=1", NULL };
1429 char *reset_event[] = { "RESET=1", NULL };
1430 char *reset_done_event[] = { "ERROR=0", NULL };
f69061be 1431 int i, ret;
8a905236 1432
f316a42c
BG
1433 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
1434
7db0ba24
DV
1435 /*
1436 * Note that there's only one work item which does gpu resets, so we
1437 * need not worry about concurrent gpu resets potentially incrementing
1438 * error->reset_counter twice. We only need to take care of another
1439 * racing irq/hangcheck declaring the gpu dead for a second time. A
1440 * quick check for that is good enough: schedule_work ensures the
1441 * correct ordering between hang detection and this work item, and since
1442 * the reset in-progress bit is only ever set by code outside of this
1443 * work we don't need to worry about any other races.
1444 */
1445 if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
f803aa55 1446 DRM_DEBUG_DRIVER("resetting chip\n");
7db0ba24
DV
1447 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
1448 reset_event);
1f83fee0 1449
f69061be
DV
1450 ret = i915_reset(dev);
1451
1452 if (ret == 0) {
1453 /*
1454 * After all the gem state is reset, increment the reset
1455 * counter and wake up everyone waiting for the reset to
1456 * complete.
1457 *
1458 * Since unlock operations are a one-sided barrier only,
1459 * we need to insert a barrier here to order any seqno
1460 * updates before
1461 * the counter increment.
1462 */
1463 smp_mb__before_atomic_inc();
1464 atomic_inc(&dev_priv->gpu_error.reset_counter);
1465
1466 kobject_uevent_env(&dev->primary->kdev.kobj,
1467 KOBJ_CHANGE, reset_done_event);
1f83fee0
DV
1468 } else {
1469 atomic_set(&error->reset_counter, I915_WEDGED);
f316a42c 1470 }
1f83fee0 1471
f69061be
DV
1472 for_each_ring(ring, dev_priv, i)
1473 wake_up_all(&ring->irq_queue);
1474
96a02917
VS
1475 intel_display_handle_reset(dev);
1476
1f83fee0 1477 wake_up_all(&dev_priv->gpu_error.reset_queue);
f316a42c 1478 }
8a905236
JB
1479}
1480
85f9e50d
DV
1481/* NB: please notice the memset */
1482static void i915_get_extra_instdone(struct drm_device *dev,
1483 uint32_t *instdone)
1484{
1485 struct drm_i915_private *dev_priv = dev->dev_private;
1486 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG);
1487
1488 switch(INTEL_INFO(dev)->gen) {
1489 case 2:
1490 case 3:
1491 instdone[0] = I915_READ(INSTDONE);
1492 break;
1493 case 4:
1494 case 5:
1495 case 6:
1496 instdone[0] = I915_READ(INSTDONE_I965);
1497 instdone[1] = I915_READ(INSTDONE1);
1498 break;
1499 default:
1500 WARN_ONCE(1, "Unsupported platform\n");
1501 case 7:
1502 instdone[0] = I915_READ(GEN7_INSTDONE_1);
1503 instdone[1] = I915_READ(GEN7_SC_INSTDONE);
1504 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE);
1505 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
1506 break;
1507 }
1508}
1509
3bd3c932 1510#ifdef CONFIG_DEBUG_FS
9df30794 1511static struct drm_i915_error_object *
d0d045e8
BW
1512i915_error_object_create_sized(struct drm_i915_private *dev_priv,
1513 struct drm_i915_gem_object *src,
1514 const int num_pages)
9df30794
CW
1515{
1516 struct drm_i915_error_object *dst;
d0d045e8 1517 int i;
e56660dd 1518 u32 reloc_offset;
9df30794 1519
05394f39 1520 if (src == NULL || src->pages == NULL)
9df30794
CW
1521 return NULL;
1522
d0d045e8 1523 dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC);
9df30794
CW
1524 if (dst == NULL)
1525 return NULL;
1526
05394f39 1527 reloc_offset = src->gtt_offset;
d0d045e8 1528 for (i = 0; i < num_pages; i++) {
788885ae 1529 unsigned long flags;
e56660dd 1530 void *d;
788885ae 1531
e56660dd 1532 d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
9df30794
CW
1533 if (d == NULL)
1534 goto unwind;
e56660dd 1535
788885ae 1536 local_irq_save(flags);
5d4545ae 1537 if (reloc_offset < dev_priv->gtt.mappable_end &&
74898d7e 1538 src->has_global_gtt_mapping) {
172975aa
CW
1539 void __iomem *s;
1540
1541 /* Simply ignore tiling or any overlapping fence.
1542 * It's part of the error state, and this hopefully
1543 * captures what the GPU read.
1544 */
1545
5d4545ae 1546 s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
172975aa
CW
1547 reloc_offset);
1548 memcpy_fromio(d, s, PAGE_SIZE);
1549 io_mapping_unmap_atomic(s);
960e3564
CW
1550 } else if (src->stolen) {
1551 unsigned long offset;
1552
1553 offset = dev_priv->mm.stolen_base;
1554 offset += src->stolen->start;
1555 offset += i << PAGE_SHIFT;
1556
1a240d4d 1557 memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE);
172975aa 1558 } else {
9da3da66 1559 struct page *page;
172975aa
CW
1560 void *s;
1561
9da3da66 1562 page = i915_gem_object_get_page(src, i);
172975aa 1563
9da3da66
CW
1564 drm_clflush_pages(&page, 1);
1565
1566 s = kmap_atomic(page);
172975aa
CW
1567 memcpy(d, s, PAGE_SIZE);
1568 kunmap_atomic(s);
1569
9da3da66 1570 drm_clflush_pages(&page, 1);
172975aa 1571 }
788885ae 1572 local_irq_restore(flags);
e56660dd 1573
9da3da66 1574 dst->pages[i] = d;
e56660dd
CW
1575
1576 reloc_offset += PAGE_SIZE;
9df30794 1577 }
d0d045e8 1578 dst->page_count = num_pages;
05394f39 1579 dst->gtt_offset = src->gtt_offset;
9df30794
CW
1580
1581 return dst;
1582
1583unwind:
9da3da66
CW
1584 while (i--)
1585 kfree(dst->pages[i]);
9df30794
CW
1586 kfree(dst);
1587 return NULL;
1588}
d0d045e8
BW
1589#define i915_error_object_create(dev_priv, src) \
1590 i915_error_object_create_sized((dev_priv), (src), \
1591 (src)->base.size>>PAGE_SHIFT)
9df30794
CW
1592
1593static void
1594i915_error_object_free(struct drm_i915_error_object *obj)
1595{
1596 int page;
1597
1598 if (obj == NULL)
1599 return;
1600
1601 for (page = 0; page < obj->page_count; page++)
1602 kfree(obj->pages[page]);
1603
1604 kfree(obj);
1605}
1606
742cbee8
DV
1607void
1608i915_error_state_free(struct kref *error_ref)
9df30794 1609{
742cbee8
DV
1610 struct drm_i915_error_state *error = container_of(error_ref,
1611 typeof(*error), ref);
e2f973d5
CW
1612 int i;
1613
52d39a21
CW
1614 for (i = 0; i < ARRAY_SIZE(error->ring); i++) {
1615 i915_error_object_free(error->ring[i].batchbuffer);
1616 i915_error_object_free(error->ring[i].ringbuffer);
7ed73da0 1617 i915_error_object_free(error->ring[i].ctx);
52d39a21
CW
1618 kfree(error->ring[i].requests);
1619 }
e2f973d5 1620
9df30794 1621 kfree(error->active_bo);
6ef3d427 1622 kfree(error->overlay);
7ed73da0 1623 kfree(error->display);
9df30794
CW
1624 kfree(error);
1625}
1b50247a
CW
1626static void capture_bo(struct drm_i915_error_buffer *err,
1627 struct drm_i915_gem_object *obj)
1628{
1629 err->size = obj->base.size;
1630 err->name = obj->base.name;
0201f1ec
CW
1631 err->rseqno = obj->last_read_seqno;
1632 err->wseqno = obj->last_write_seqno;
1b50247a
CW
1633 err->gtt_offset = obj->gtt_offset;
1634 err->read_domains = obj->base.read_domains;
1635 err->write_domain = obj->base.write_domain;
1636 err->fence_reg = obj->fence_reg;
1637 err->pinned = 0;
1638 if (obj->pin_count > 0)
1639 err->pinned = 1;
1640 if (obj->user_pin_count > 0)
1641 err->pinned = -1;
1642 err->tiling = obj->tiling_mode;
1643 err->dirty = obj->dirty;
1644 err->purgeable = obj->madv != I915_MADV_WILLNEED;
1645 err->ring = obj->ring ? obj->ring->id : -1;
1646 err->cache_level = obj->cache_level;
1647}
9df30794 1648
1b50247a
CW
1649static u32 capture_active_bo(struct drm_i915_error_buffer *err,
1650 int count, struct list_head *head)
c724e8a9
CW
1651{
1652 struct drm_i915_gem_object *obj;
1653 int i = 0;
1654
1655 list_for_each_entry(obj, head, mm_list) {
1b50247a 1656 capture_bo(err++, obj);
c724e8a9
CW
1657 if (++i == count)
1658 break;
1b50247a
CW
1659 }
1660
1661 return i;
1662}
1663
1664static u32 capture_pinned_bo(struct drm_i915_error_buffer *err,
1665 int count, struct list_head *head)
1666{
1667 struct drm_i915_gem_object *obj;
1668 int i = 0;
1669
35c20a60 1670 list_for_each_entry(obj, head, global_list) {
1b50247a
CW
1671 if (obj->pin_count == 0)
1672 continue;
c724e8a9 1673
1b50247a
CW
1674 capture_bo(err++, obj);
1675 if (++i == count)
1676 break;
c724e8a9
CW
1677 }
1678
1679 return i;
1680}
1681
748ebc60
CW
1682static void i915_gem_record_fences(struct drm_device *dev,
1683 struct drm_i915_error_state *error)
1684{
1685 struct drm_i915_private *dev_priv = dev->dev_private;
1686 int i;
1687
1688 /* Fences */
1689 switch (INTEL_INFO(dev)->gen) {
775d17b6 1690 case 7:
748ebc60 1691 case 6:
42b5aeab 1692 for (i = 0; i < dev_priv->num_fence_regs; i++)
748ebc60
CW
1693 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
1694 break;
1695 case 5:
1696 case 4:
1697 for (i = 0; i < 16; i++)
1698 error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
1699 break;
1700 case 3:
1701 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
1702 for (i = 0; i < 8; i++)
1703 error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
1704 case 2:
1705 for (i = 0; i < 8; i++)
1706 error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
1707 break;
1708
7dbf9d6e
BW
1709 default:
1710 BUG();
748ebc60
CW
1711 }
1712}
1713
bcfb2e28
CW
1714static struct drm_i915_error_object *
1715i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
1716 struct intel_ring_buffer *ring)
1717{
1718 struct drm_i915_gem_object *obj;
1719 u32 seqno;
1720
1721 if (!ring->get_seqno)
1722 return NULL;
1723
b45305fc
DV
1724 if (HAS_BROKEN_CS_TLB(dev_priv->dev)) {
1725 u32 acthd = I915_READ(ACTHD);
1726
1727 if (WARN_ON(ring->id != RCS))
1728 return NULL;
1729
1730 obj = ring->private;
1731 if (acthd >= obj->gtt_offset &&
1732 acthd < obj->gtt_offset + obj->base.size)
1733 return i915_error_object_create(dev_priv, obj);
1734 }
1735
b2eadbc8 1736 seqno = ring->get_seqno(ring, false);
bcfb2e28
CW
1737 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
1738 if (obj->ring != ring)
1739 continue;
1740
0201f1ec 1741 if (i915_seqno_passed(seqno, obj->last_read_seqno))
bcfb2e28
CW
1742 continue;
1743
1744 if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0)
1745 continue;
1746
1747 /* We need to copy these to an anonymous buffer as the simplest
1748 * method to avoid being overwritten by userspace.
1749 */
1750 return i915_error_object_create(dev_priv, obj);
1751 }
1752
1753 return NULL;
1754}
1755
d27b1e0e
DV
1756static void i915_record_ring_state(struct drm_device *dev,
1757 struct drm_i915_error_state *error,
1758 struct intel_ring_buffer *ring)
1759{
1760 struct drm_i915_private *dev_priv = dev->dev_private;
1761
33f3f518 1762 if (INTEL_INFO(dev)->gen >= 6) {
12f55818 1763 error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50);
33f3f518 1764 error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring));
7e3b8737
DV
1765 error->semaphore_mboxes[ring->id][0]
1766 = I915_READ(RING_SYNC_0(ring->mmio_base));
1767 error->semaphore_mboxes[ring->id][1]
1768 = I915_READ(RING_SYNC_1(ring->mmio_base));
df2b23d9
CW
1769 error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0];
1770 error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1];
33f3f518 1771 }
c1cd90ed 1772
d27b1e0e 1773 if (INTEL_INFO(dev)->gen >= 4) {
9d2f41fa 1774 error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base));
d27b1e0e
DV
1775 error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base));
1776 error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base));
1777 error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base));
c1cd90ed 1778 error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base));
050ee91f 1779 if (ring->id == RCS)
d27b1e0e 1780 error->bbaddr = I915_READ64(BB_ADDR);
d27b1e0e 1781 } else {
9d2f41fa 1782 error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX);
d27b1e0e
DV
1783 error->ipeir[ring->id] = I915_READ(IPEIR);
1784 error->ipehr[ring->id] = I915_READ(IPEHR);
1785 error->instdone[ring->id] = I915_READ(INSTDONE);
d27b1e0e
DV
1786 }
1787
9574b3fe 1788 error->waiting[ring->id] = waitqueue_active(&ring->irq_queue);
c1cd90ed 1789 error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base));
b2eadbc8 1790 error->seqno[ring->id] = ring->get_seqno(ring, false);
d27b1e0e 1791 error->acthd[ring->id] = intel_ring_get_active_head(ring);
c1cd90ed
DV
1792 error->head[ring->id] = I915_READ_HEAD(ring);
1793 error->tail[ring->id] = I915_READ_TAIL(ring);
0f3b6849 1794 error->ctl[ring->id] = I915_READ_CTL(ring);
7e3b8737
DV
1795
1796 error->cpu_ring_head[ring->id] = ring->head;
1797 error->cpu_ring_tail[ring->id] = ring->tail;
d27b1e0e
DV
1798}
1799
8c123e54
BW
1800
1801static void i915_gem_record_active_context(struct intel_ring_buffer *ring,
1802 struct drm_i915_error_state *error,
1803 struct drm_i915_error_ring *ering)
1804{
1805 struct drm_i915_private *dev_priv = ring->dev->dev_private;
1806 struct drm_i915_gem_object *obj;
1807
1808 /* Currently render ring is the only HW context user */
1809 if (ring->id != RCS || !error->ccid)
1810 return;
1811
35c20a60 1812 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
8c123e54
BW
1813 if ((error->ccid & PAGE_MASK) == obj->gtt_offset) {
1814 ering->ctx = i915_error_object_create_sized(dev_priv,
1815 obj, 1);
1816 }
1817 }
1818}
1819
52d39a21
CW
1820static void i915_gem_record_rings(struct drm_device *dev,
1821 struct drm_i915_error_state *error)
1822{
1823 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513 1824 struct intel_ring_buffer *ring;
52d39a21
CW
1825 struct drm_i915_gem_request *request;
1826 int i, count;
1827
b4519513 1828 for_each_ring(ring, dev_priv, i) {
52d39a21
CW
1829 i915_record_ring_state(dev, error, ring);
1830
1831 error->ring[i].batchbuffer =
1832 i915_error_first_batchbuffer(dev_priv, ring);
1833
1834 error->ring[i].ringbuffer =
1835 i915_error_object_create(dev_priv, ring->obj);
1836
8c123e54
BW
1837
1838 i915_gem_record_active_context(ring, error, &error->ring[i]);
1839
52d39a21
CW
1840 count = 0;
1841 list_for_each_entry(request, &ring->request_list, list)
1842 count++;
1843
1844 error->ring[i].num_requests = count;
1845 error->ring[i].requests =
1846 kmalloc(count*sizeof(struct drm_i915_error_request),
1847 GFP_ATOMIC);
1848 if (error->ring[i].requests == NULL) {
1849 error->ring[i].num_requests = 0;
1850 continue;
1851 }
1852
1853 count = 0;
1854 list_for_each_entry(request, &ring->request_list, list) {
1855 struct drm_i915_error_request *erq;
1856
1857 erq = &error->ring[i].requests[count++];
1858 erq->seqno = request->seqno;
1859 erq->jiffies = request->emitted_jiffies;
ee4f42b1 1860 erq->tail = request->tail;
52d39a21
CW
1861 }
1862 }
1863}
1864
8a905236
JB
1865/**
1866 * i915_capture_error_state - capture an error record for later analysis
1867 * @dev: drm device
1868 *
1869 * Should be called when an error is detected (either a hang or an error
1870 * interrupt) to capture error state from the time of the error. Fills
1871 * out a structure which becomes available in debugfs for user level tools
1872 * to pick up.
1873 */
63eeaf38
JB
1874static void i915_capture_error_state(struct drm_device *dev)
1875{
1876 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 1877 struct drm_i915_gem_object *obj;
63eeaf38
JB
1878 struct drm_i915_error_state *error;
1879 unsigned long flags;
9db4a9c7 1880 int i, pipe;
63eeaf38 1881
99584db3
DV
1882 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1883 error = dev_priv->gpu_error.first_error;
1884 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1885 if (error)
1886 return;
63eeaf38 1887
9db4a9c7 1888 /* Account for pipe specific data like PIPE*STAT */
33f3f518 1889 error = kzalloc(sizeof(*error), GFP_ATOMIC);
63eeaf38 1890 if (!error) {
9df30794
CW
1891 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
1892 return;
63eeaf38
JB
1893 }
1894
5d83d294 1895 DRM_INFO("capturing error event; look for more information in "
2f86f191 1896 "/sys/kernel/debug/dri/%d/i915_error_state\n",
b6f7833b 1897 dev->primary->index);
2fa772f3 1898
742cbee8 1899 kref_init(&error->ref);
63eeaf38
JB
1900 error->eir = I915_READ(EIR);
1901 error->pgtbl_er = I915_READ(PGTBL_ER);
211816ec
BW
1902 if (HAS_HW_CONTEXTS(dev))
1903 error->ccid = I915_READ(CCID);
be998e2e
BW
1904
1905 if (HAS_PCH_SPLIT(dev))
1906 error->ier = I915_READ(DEIER) | I915_READ(GTIER);
1907 else if (IS_VALLEYVIEW(dev))
1908 error->ier = I915_READ(GTIER) | I915_READ(VLV_IER);
1909 else if (IS_GEN2(dev))
1910 error->ier = I915_READ16(IER);
1911 else
1912 error->ier = I915_READ(IER);
1913
0f3b6849
CW
1914 if (INTEL_INFO(dev)->gen >= 6)
1915 error->derrmr = I915_READ(DERRMR);
1916
1917 if (IS_VALLEYVIEW(dev))
1918 error->forcewake = I915_READ(FORCEWAKE_VLV);
1919 else if (INTEL_INFO(dev)->gen >= 7)
1920 error->forcewake = I915_READ(FORCEWAKE_MT);
1921 else if (INTEL_INFO(dev)->gen == 6)
1922 error->forcewake = I915_READ(FORCEWAKE);
1923
4f3308b9
PZ
1924 if (!HAS_PCH_SPLIT(dev))
1925 for_each_pipe(pipe)
1926 error->pipestat[pipe] = I915_READ(PIPESTAT(pipe));
d27b1e0e 1927
33f3f518 1928 if (INTEL_INFO(dev)->gen >= 6) {
f406839f 1929 error->error = I915_READ(ERROR_GEN6);
33f3f518
DV
1930 error->done_reg = I915_READ(DONE_REG);
1931 }
d27b1e0e 1932
71e172e8
BW
1933 if (INTEL_INFO(dev)->gen == 7)
1934 error->err_int = I915_READ(GEN7_ERR_INT);
1935
050ee91f
BW
1936 i915_get_extra_instdone(dev, error->extra_instdone);
1937
748ebc60 1938 i915_gem_record_fences(dev, error);
52d39a21 1939 i915_gem_record_rings(dev, error);
9df30794 1940
c724e8a9 1941 /* Record buffers on the active and pinned lists. */
9df30794 1942 error->active_bo = NULL;
c724e8a9 1943 error->pinned_bo = NULL;
9df30794 1944
bcfb2e28
CW
1945 i = 0;
1946 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list)
1947 i++;
1948 error->active_bo_count = i;
35c20a60 1949 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
1b50247a
CW
1950 if (obj->pin_count)
1951 i++;
bcfb2e28 1952 error->pinned_bo_count = i - error->active_bo_count;
c724e8a9 1953
8e934dbf
CW
1954 error->active_bo = NULL;
1955 error->pinned_bo = NULL;
bcfb2e28
CW
1956 if (i) {
1957 error->active_bo = kmalloc(sizeof(*error->active_bo)*i,
9df30794 1958 GFP_ATOMIC);
c724e8a9
CW
1959 if (error->active_bo)
1960 error->pinned_bo =
1961 error->active_bo + error->active_bo_count;
9df30794
CW
1962 }
1963
c724e8a9
CW
1964 if (error->active_bo)
1965 error->active_bo_count =
1b50247a
CW
1966 capture_active_bo(error->active_bo,
1967 error->active_bo_count,
1968 &dev_priv->mm.active_list);
c724e8a9
CW
1969
1970 if (error->pinned_bo)
1971 error->pinned_bo_count =
1b50247a
CW
1972 capture_pinned_bo(error->pinned_bo,
1973 error->pinned_bo_count,
6c085a72 1974 &dev_priv->mm.bound_list);
c724e8a9 1975
9df30794
CW
1976 do_gettimeofday(&error->time);
1977
6ef3d427 1978 error->overlay = intel_overlay_capture_error_state(dev);
c4a1d9e4 1979 error->display = intel_display_capture_error_state(dev);
6ef3d427 1980
99584db3
DV
1981 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1982 if (dev_priv->gpu_error.first_error == NULL) {
1983 dev_priv->gpu_error.first_error = error;
9df30794
CW
1984 error = NULL;
1985 }
99584db3 1986 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
1987
1988 if (error)
742cbee8 1989 i915_error_state_free(&error->ref);
9df30794
CW
1990}
1991
1992void i915_destroy_error_state(struct drm_device *dev)
1993{
1994 struct drm_i915_private *dev_priv = dev->dev_private;
1995 struct drm_i915_error_state *error;
6dc0e816 1996 unsigned long flags;
9df30794 1997
99584db3
DV
1998 spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
1999 error = dev_priv->gpu_error.first_error;
2000 dev_priv->gpu_error.first_error = NULL;
2001 spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
9df30794
CW
2002
2003 if (error)
742cbee8 2004 kref_put(&error->ref, i915_error_state_free);
63eeaf38 2005}
3bd3c932
CW
2006#else
2007#define i915_capture_error_state(x)
2008#endif
63eeaf38 2009
35aed2e6 2010static void i915_report_and_clear_eir(struct drm_device *dev)
8a905236
JB
2011{
2012 struct drm_i915_private *dev_priv = dev->dev_private;
bd9854f9 2013 uint32_t instdone[I915_NUM_INSTDONE_REG];
8a905236 2014 u32 eir = I915_READ(EIR);
050ee91f 2015 int pipe, i;
8a905236 2016
35aed2e6
CW
2017 if (!eir)
2018 return;
8a905236 2019
a70491cc 2020 pr_err("render error detected, EIR: 0x%08x\n", eir);
8a905236 2021
bd9854f9
BW
2022 i915_get_extra_instdone(dev, instdone);
2023
8a905236
JB
2024 if (IS_G4X(dev)) {
2025 if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) {
2026 u32 ipeir = I915_READ(IPEIR_I965);
2027
a70491cc
JP
2028 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2029 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
050ee91f
BW
2030 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2031 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a70491cc 2032 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2033 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2034 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2035 POSTING_READ(IPEIR_I965);
8a905236
JB
2036 }
2037 if (eir & GM45_ERROR_PAGE_TABLE) {
2038 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2039 pr_err("page table error\n");
2040 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2041 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2042 POSTING_READ(PGTBL_ER);
8a905236
JB
2043 }
2044 }
2045
a6c45cf0 2046 if (!IS_GEN2(dev)) {
8a905236
JB
2047 if (eir & I915_ERROR_PAGE_TABLE) {
2048 u32 pgtbl_err = I915_READ(PGTBL_ER);
a70491cc
JP
2049 pr_err("page table error\n");
2050 pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err);
8a905236 2051 I915_WRITE(PGTBL_ER, pgtbl_err);
3143a2bf 2052 POSTING_READ(PGTBL_ER);
8a905236
JB
2053 }
2054 }
2055
2056 if (eir & I915_ERROR_MEMORY_REFRESH) {
a70491cc 2057 pr_err("memory refresh error:\n");
9db4a9c7 2058 for_each_pipe(pipe)
a70491cc 2059 pr_err("pipe %c stat: 0x%08x\n",
9db4a9c7 2060 pipe_name(pipe), I915_READ(PIPESTAT(pipe)));
8a905236
JB
2061 /* pipestat has already been acked */
2062 }
2063 if (eir & I915_ERROR_INSTRUCTION) {
a70491cc
JP
2064 pr_err("instruction error\n");
2065 pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM));
050ee91f
BW
2066 for (i = 0; i < ARRAY_SIZE(instdone); i++)
2067 pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]);
a6c45cf0 2068 if (INTEL_INFO(dev)->gen < 4) {
8a905236
JB
2069 u32 ipeir = I915_READ(IPEIR);
2070
a70491cc
JP
2071 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR));
2072 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR));
a70491cc 2073 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD));
8a905236 2074 I915_WRITE(IPEIR, ipeir);
3143a2bf 2075 POSTING_READ(IPEIR);
8a905236
JB
2076 } else {
2077 u32 ipeir = I915_READ(IPEIR_I965);
2078
a70491cc
JP
2079 pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965));
2080 pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965));
a70491cc 2081 pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS));
a70491cc 2082 pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965));
8a905236 2083 I915_WRITE(IPEIR_I965, ipeir);
3143a2bf 2084 POSTING_READ(IPEIR_I965);
8a905236
JB
2085 }
2086 }
2087
2088 I915_WRITE(EIR, eir);
3143a2bf 2089 POSTING_READ(EIR);
8a905236
JB
2090 eir = I915_READ(EIR);
2091 if (eir) {
2092 /*
2093 * some errors might have become stuck,
2094 * mask them.
2095 */
2096 DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir);
2097 I915_WRITE(EMR, I915_READ(EMR) | eir);
2098 I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2099 }
35aed2e6
CW
2100}
2101
2102/**
2103 * i915_handle_error - handle an error interrupt
2104 * @dev: drm device
2105 *
2106 * Do some basic checking of regsiter state at error interrupt time and
2107 * dump it to the syslog. Also call i915_capture_error_state() to make
2108 * sure we get a record and make it available in debugfs. Fire a uevent
2109 * so userspace knows something bad happened (should trigger collection
2110 * of a ring dump etc.).
2111 */
527f9e90 2112void i915_handle_error(struct drm_device *dev, bool wedged)
35aed2e6
CW
2113{
2114 struct drm_i915_private *dev_priv = dev->dev_private;
b4519513
CW
2115 struct intel_ring_buffer *ring;
2116 int i;
35aed2e6
CW
2117
2118 i915_capture_error_state(dev);
2119 i915_report_and_clear_eir(dev);
8a905236 2120
ba1234d1 2121 if (wedged) {
f69061be
DV
2122 atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
2123 &dev_priv->gpu_error.reset_counter);
ba1234d1 2124
11ed50ec 2125 /*
1f83fee0
DV
2126 * Wakeup waiting processes so that the reset work item
2127 * doesn't deadlock trying to grab various locks.
11ed50ec 2128 */
b4519513
CW
2129 for_each_ring(ring, dev_priv, i)
2130 wake_up_all(&ring->irq_queue);
11ed50ec
BG
2131 }
2132
99584db3 2133 queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
8a905236
JB
2134}
2135
21ad8330 2136static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe)
4e5359cd
SF
2137{
2138 drm_i915_private_t *dev_priv = dev->dev_private;
2139 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
2140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 2141 struct drm_i915_gem_object *obj;
4e5359cd
SF
2142 struct intel_unpin_work *work;
2143 unsigned long flags;
2144 bool stall_detected;
2145
2146 /* Ignore early vblank irqs */
2147 if (intel_crtc == NULL)
2148 return;
2149
2150 spin_lock_irqsave(&dev->event_lock, flags);
2151 work = intel_crtc->unpin_work;
2152
e7d841ca
CW
2153 if (work == NULL ||
2154 atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE ||
2155 !work->enable_stall_check) {
4e5359cd
SF
2156 /* Either the pending flip IRQ arrived, or we're too early. Don't check */
2157 spin_unlock_irqrestore(&dev->event_lock, flags);
2158 return;
2159 }
2160
2161 /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
05394f39 2162 obj = work->pending_flip_obj;
a6c45cf0 2163 if (INTEL_INFO(dev)->gen >= 4) {
9db4a9c7 2164 int dspsurf = DSPSURF(intel_crtc->plane);
446f2545
AR
2165 stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) ==
2166 obj->gtt_offset;
4e5359cd 2167 } else {
9db4a9c7 2168 int dspaddr = DSPADDR(intel_crtc->plane);
05394f39 2169 stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
01f2c773 2170 crtc->y * crtc->fb->pitches[0] +
4e5359cd
SF
2171 crtc->x * crtc->fb->bits_per_pixel/8);
2172 }
2173
2174 spin_unlock_irqrestore(&dev->event_lock, flags);
2175
2176 if (stall_detected) {
2177 DRM_DEBUG_DRIVER("Pageflip stall detected\n");
2178 intel_prepare_page_flip(dev, intel_crtc->plane);
2179 }
2180}
2181
42f52ef8
KP
2182/* Called from drm generic code, passed 'crtc' which
2183 * we use as a pipe index
2184 */
f71d4af4 2185static int i915_enable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
2186{
2187 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 2188 unsigned long irqflags;
71e0ffa5 2189
5eddb70b 2190 if (!i915_pipe_enabled(dev, pipe))
71e0ffa5 2191 return -EINVAL;
0a3e67a4 2192
1ec14ad3 2193 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f796cf8f 2194 if (INTEL_INFO(dev)->gen >= 4)
7c463586
KP
2195 i915_enable_pipestat(dev_priv, pipe,
2196 PIPE_START_VBLANK_INTERRUPT_ENABLE);
e9d21d7f 2197 else
7c463586
KP
2198 i915_enable_pipestat(dev_priv, pipe,
2199 PIPE_VBLANK_INTERRUPT_ENABLE);
8692d00e
CW
2200
2201 /* maintain vblank delivery even in deep C-states */
2202 if (dev_priv->info->gen == 3)
6b26c86d 2203 I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS));
1ec14ad3 2204 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
8692d00e 2205
0a3e67a4
JB
2206 return 0;
2207}
2208
f71d4af4 2209static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
f796cf8f
JB
2210{
2211 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2212 unsigned long irqflags;
2213
2214 if (!i915_pipe_enabled(dev, pipe))
2215 return -EINVAL;
2216
2217 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2218 ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
0206e353 2219 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
f796cf8f
JB
2220 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2221
2222 return 0;
2223}
2224
f71d4af4 2225static int ivybridge_enable_vblank(struct drm_device *dev, int pipe)
b1f14ad0
JB
2226{
2227 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2228 unsigned long irqflags;
2229
2230 if (!i915_pipe_enabled(dev, pipe))
2231 return -EINVAL;
2232
2233 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b615b57a
CW
2234 ironlake_enable_display_irq(dev_priv,
2235 DE_PIPEA_VBLANK_IVB << (5 * pipe));
b1f14ad0
JB
2236 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2237
2238 return 0;
2239}
2240
7e231dbe
JB
2241static int valleyview_enable_vblank(struct drm_device *dev, int pipe)
2242{
2243 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2244 unsigned long irqflags;
31acc7f5 2245 u32 imr;
7e231dbe
JB
2246
2247 if (!i915_pipe_enabled(dev, pipe))
2248 return -EINVAL;
2249
2250 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
7e231dbe 2251 imr = I915_READ(VLV_IMR);
31acc7f5 2252 if (pipe == 0)
7e231dbe 2253 imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
31acc7f5 2254 else
7e231dbe 2255 imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 2256 I915_WRITE(VLV_IMR, imr);
31acc7f5
JB
2257 i915_enable_pipestat(dev_priv, pipe,
2258 PIPE_START_VBLANK_INTERRUPT_ENABLE);
7e231dbe
JB
2259 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2260
2261 return 0;
2262}
2263
42f52ef8
KP
2264/* Called from drm generic code, passed 'crtc' which
2265 * we use as a pipe index
2266 */
f71d4af4 2267static void i915_disable_vblank(struct drm_device *dev, int pipe)
0a3e67a4
JB
2268{
2269 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e9d21d7f 2270 unsigned long irqflags;
0a3e67a4 2271
1ec14ad3 2272 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
8692d00e 2273 if (dev_priv->info->gen == 3)
6b26c86d 2274 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS));
8692d00e 2275
f796cf8f
JB
2276 i915_disable_pipestat(dev_priv, pipe,
2277 PIPE_VBLANK_INTERRUPT_ENABLE |
2278 PIPE_START_VBLANK_INTERRUPT_ENABLE);
2279 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2280}
2281
f71d4af4 2282static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
f796cf8f
JB
2283{
2284 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2285 unsigned long irqflags;
2286
2287 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
2288 ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
0206e353 2289 DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
1ec14ad3 2290 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
0a3e67a4
JB
2291}
2292
f71d4af4 2293static void ivybridge_disable_vblank(struct drm_device *dev, int pipe)
b1f14ad0
JB
2294{
2295 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2296 unsigned long irqflags;
2297
2298 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
b615b57a
CW
2299 ironlake_disable_display_irq(dev_priv,
2300 DE_PIPEA_VBLANK_IVB << (pipe * 5));
b1f14ad0
JB
2301 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2302}
2303
7e231dbe
JB
2304static void valleyview_disable_vblank(struct drm_device *dev, int pipe)
2305{
2306 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2307 unsigned long irqflags;
31acc7f5 2308 u32 imr;
7e231dbe
JB
2309
2310 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
31acc7f5
JB
2311 i915_disable_pipestat(dev_priv, pipe,
2312 PIPE_START_VBLANK_INTERRUPT_ENABLE);
7e231dbe 2313 imr = I915_READ(VLV_IMR);
31acc7f5 2314 if (pipe == 0)
7e231dbe 2315 imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
31acc7f5 2316 else
7e231dbe 2317 imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 2318 I915_WRITE(VLV_IMR, imr);
7e231dbe
JB
2319 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
2320}
2321
893eead0
CW
2322static u32
2323ring_last_seqno(struct intel_ring_buffer *ring)
852835f3 2324{
893eead0
CW
2325 return list_entry(ring->request_list.prev,
2326 struct drm_i915_gem_request, list)->seqno;
2327}
2328
9107e9d2
CW
2329static bool
2330ring_idle(struct intel_ring_buffer *ring, u32 seqno)
2331{
2332 return (list_empty(&ring->request_list) ||
2333 i915_seqno_passed(seqno, ring_last_seqno(ring)));
f65d9421
BG
2334}
2335
6274f212
CW
2336static struct intel_ring_buffer *
2337semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno)
a24a11e6
CW
2338{
2339 struct drm_i915_private *dev_priv = ring->dev->dev_private;
6274f212 2340 u32 cmd, ipehr, acthd, acthd_min;
a24a11e6
CW
2341
2342 ipehr = I915_READ(RING_IPEHR(ring->mmio_base));
2343 if ((ipehr & ~(0x3 << 16)) !=
2344 (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER))
6274f212 2345 return NULL;
a24a11e6
CW
2346
2347 /* ACTHD is likely pointing to the dword after the actual command,
2348 * so scan backwards until we find the MBOX.
2349 */
6274f212 2350 acthd = intel_ring_get_active_head(ring) & HEAD_ADDR;
a24a11e6
CW
2351 acthd_min = max((int)acthd - 3 * 4, 0);
2352 do {
2353 cmd = ioread32(ring->virtual_start + acthd);
2354 if (cmd == ipehr)
2355 break;
2356
2357 acthd -= 4;
2358 if (acthd < acthd_min)
6274f212 2359 return NULL;
a24a11e6
CW
2360 } while (1);
2361
6274f212
CW
2362 *seqno = ioread32(ring->virtual_start+acthd+4)+1;
2363 return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3];
a24a11e6
CW
2364}
2365
6274f212
CW
2366static int semaphore_passed(struct intel_ring_buffer *ring)
2367{
2368 struct drm_i915_private *dev_priv = ring->dev->dev_private;
2369 struct intel_ring_buffer *signaller;
2370 u32 seqno, ctl;
2371
2372 ring->hangcheck.deadlock = true;
2373
2374 signaller = semaphore_waits_for(ring, &seqno);
2375 if (signaller == NULL || signaller->hangcheck.deadlock)
2376 return -1;
2377
2378 /* cursory check for an unkickable deadlock */
2379 ctl = I915_READ_CTL(signaller);
2380 if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
2381 return -1;
2382
2383 return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
2384}
2385
2386static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
2387{
2388 struct intel_ring_buffer *ring;
2389 int i;
2390
2391 for_each_ring(ring, dev_priv, i)
2392 ring->hangcheck.deadlock = false;
2393}
2394
ad8beaea
MK
2395static enum intel_ring_hangcheck_action
2396ring_stuck(struct intel_ring_buffer *ring, u32 acthd)
1ec14ad3
CW
2397{
2398 struct drm_device *dev = ring->dev;
2399 struct drm_i915_private *dev_priv = dev->dev_private;
9107e9d2
CW
2400 u32 tmp;
2401
6274f212
CW
2402 if (ring->hangcheck.acthd != acthd)
2403 return active;
2404
9107e9d2 2405 if (IS_GEN2(dev))
6274f212 2406 return hung;
9107e9d2
CW
2407
2408 /* Is the chip hanging on a WAIT_FOR_EVENT?
2409 * If so we can simply poke the RB_WAIT bit
2410 * and break the hang. This should work on
2411 * all but the second generation chipsets.
2412 */
2413 tmp = I915_READ_CTL(ring);
1ec14ad3
CW
2414 if (tmp & RING_WAIT) {
2415 DRM_ERROR("Kicking stuck wait on %s\n",
2416 ring->name);
2417 I915_WRITE_CTL(ring, tmp);
6274f212
CW
2418 return kick;
2419 }
2420
2421 if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) {
2422 switch (semaphore_passed(ring)) {
2423 default:
2424 return hung;
2425 case 1:
2426 DRM_ERROR("Kicking stuck semaphore on %s\n",
2427 ring->name);
2428 I915_WRITE_CTL(ring, tmp);
2429 return kick;
2430 case 0:
2431 return wait;
2432 }
9107e9d2 2433 }
ed5cbb03 2434
6274f212 2435 return hung;
ed5cbb03
MK
2436}
2437
f65d9421
BG
2438/**
2439 * This is called when the chip hasn't reported back with completed
05407ff8
MK
2440 * batchbuffers in a long time. We keep track per ring seqno progress and
2441 * if there are no progress, hangcheck score for that ring is increased.
2442 * Further, acthd is inspected to see if the ring is stuck. On stuck case
2443 * we kick the ring. If we see no progress on three subsequent calls
2444 * we assume chip is wedged and try to fix it by resetting the chip.
f65d9421
BG
2445 */
2446void i915_hangcheck_elapsed(unsigned long data)
2447{
2448 struct drm_device *dev = (struct drm_device *)data;
2449 drm_i915_private_t *dev_priv = dev->dev_private;
b4519513 2450 struct intel_ring_buffer *ring;
b4519513 2451 int i;
05407ff8 2452 int busy_count = 0, rings_hung = 0;
9107e9d2
CW
2453 bool stuck[I915_NUM_RINGS] = { 0 };
2454#define BUSY 1
2455#define KICK 5
2456#define HUNG 20
2457#define FIRE 30
893eead0 2458
3e0dc6b0
BW
2459 if (!i915_enable_hangcheck)
2460 return;
2461
b4519513 2462 for_each_ring(ring, dev_priv, i) {
05407ff8 2463 u32 seqno, acthd;
9107e9d2 2464 bool busy = true;
05407ff8 2465
6274f212
CW
2466 semaphore_clear_deadlocks(dev_priv);
2467
05407ff8
MK
2468 seqno = ring->get_seqno(ring, false);
2469 acthd = intel_ring_get_active_head(ring);
b4519513 2470
9107e9d2
CW
2471 if (ring->hangcheck.seqno == seqno) {
2472 if (ring_idle(ring, seqno)) {
2473 if (waitqueue_active(&ring->irq_queue)) {
2474 /* Issue a wake-up to catch stuck h/w. */
2475 DRM_ERROR("Hangcheck timer elapsed... %s idle\n",
2476 ring->name);
2477 wake_up_all(&ring->irq_queue);
2478 ring->hangcheck.score += HUNG;
2479 } else
2480 busy = false;
05407ff8 2481 } else {
9107e9d2
CW
2482 int score;
2483
6274f212
CW
2484 /* We always increment the hangcheck score
2485 * if the ring is busy and still processing
2486 * the same request, so that no single request
2487 * can run indefinitely (such as a chain of
2488 * batches). The only time we do not increment
2489 * the hangcheck score on this ring, if this
2490 * ring is in a legitimate wait for another
2491 * ring. In that case the waiting ring is a
2492 * victim and we want to be sure we catch the
2493 * right culprit. Then every time we do kick
2494 * the ring, add a small increment to the
2495 * score so that we can catch a batch that is
2496 * being repeatedly kicked and so responsible
2497 * for stalling the machine.
2498 */
ad8beaea
MK
2499 ring->hangcheck.action = ring_stuck(ring,
2500 acthd);
2501
2502 switch (ring->hangcheck.action) {
6274f212
CW
2503 case wait:
2504 score = 0;
2505 break;
2506 case active:
9107e9d2 2507 score = BUSY;
6274f212
CW
2508 break;
2509 case kick:
2510 score = KICK;
2511 break;
2512 case hung:
2513 score = HUNG;
2514 stuck[i] = true;
2515 break;
2516 }
9107e9d2 2517 ring->hangcheck.score += score;
05407ff8 2518 }
9107e9d2
CW
2519 } else {
2520 /* Gradually reduce the count so that we catch DoS
2521 * attempts across multiple batches.
2522 */
2523 if (ring->hangcheck.score > 0)
2524 ring->hangcheck.score--;
d1e61e7f
CW
2525 }
2526
05407ff8
MK
2527 ring->hangcheck.seqno = seqno;
2528 ring->hangcheck.acthd = acthd;
9107e9d2 2529 busy_count += busy;
893eead0 2530 }
b9201c14 2531
92cab734 2532 for_each_ring(ring, dev_priv, i) {
9107e9d2 2533 if (ring->hangcheck.score > FIRE) {
acd78c11 2534 DRM_ERROR("%s on %s\n",
05407ff8 2535 stuck[i] ? "stuck" : "no progress",
a43adf07
CW
2536 ring->name);
2537 rings_hung++;
92cab734
MK
2538 }
2539 }
2540
05407ff8
MK
2541 if (rings_hung)
2542 return i915_handle_error(dev, true);
f65d9421 2543
05407ff8
MK
2544 if (busy_count)
2545 /* Reset timer case chip hangs without another request
2546 * being added */
2547 mod_timer(&dev_priv->gpu_error.hangcheck_timer,
2548 round_jiffies_up(jiffies +
2549 DRM_I915_HANGCHECK_JIFFIES));
f65d9421
BG
2550}
2551
91738a95
PZ
2552static void ibx_irq_preinstall(struct drm_device *dev)
2553{
2554 struct drm_i915_private *dev_priv = dev->dev_private;
2555
2556 if (HAS_PCH_NOP(dev))
2557 return;
2558
2559 /* south display irq */
2560 I915_WRITE(SDEIMR, 0xffffffff);
2561 /*
2562 * SDEIER is also touched by the interrupt handler to work around missed
2563 * PCH interrupts. Hence we can't update it after the interrupt handler
2564 * is enabled - instead we unconditionally enable all PCH interrupt
2565 * sources here, but then only unmask them as needed with SDEIMR.
2566 */
2567 I915_WRITE(SDEIER, 0xffffffff);
2568 POSTING_READ(SDEIER);
2569}
2570
1da177e4
LT
2571/* drm_dma.h hooks
2572*/
f71d4af4 2573static void ironlake_irq_preinstall(struct drm_device *dev)
036a4a7d
ZW
2574{
2575 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2576
4697995b
JB
2577 atomic_set(&dev_priv->irq_received, 0);
2578
036a4a7d 2579 I915_WRITE(HWSTAM, 0xeffe);
bdfcdb63 2580
036a4a7d
ZW
2581 /* XXX hotplug from PCH */
2582
2583 I915_WRITE(DEIMR, 0xffffffff);
2584 I915_WRITE(DEIER, 0x0);
3143a2bf 2585 POSTING_READ(DEIER);
036a4a7d
ZW
2586
2587 /* and GT */
2588 I915_WRITE(GTIMR, 0xffffffff);
2589 I915_WRITE(GTIER, 0x0);
3143a2bf 2590 POSTING_READ(GTIER);
c650156a 2591
91738a95 2592 ibx_irq_preinstall(dev);
7d99163d
BW
2593}
2594
2595static void ivybridge_irq_preinstall(struct drm_device *dev)
2596{
2597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2598
2599 atomic_set(&dev_priv->irq_received, 0);
2600
2601 I915_WRITE(HWSTAM, 0xeffe);
2602
2603 /* XXX hotplug from PCH */
2604
2605 I915_WRITE(DEIMR, 0xffffffff);
2606 I915_WRITE(DEIER, 0x0);
2607 POSTING_READ(DEIER);
2608
2609 /* and GT */
2610 I915_WRITE(GTIMR, 0xffffffff);
2611 I915_WRITE(GTIER, 0x0);
2612 POSTING_READ(GTIER);
2613
eda63ffb
BW
2614 /* Power management */
2615 I915_WRITE(GEN6_PMIMR, 0xffffffff);
2616 I915_WRITE(GEN6_PMIER, 0x0);
2617 POSTING_READ(GEN6_PMIER);
2618
91738a95 2619 ibx_irq_preinstall(dev);
036a4a7d
ZW
2620}
2621
7e231dbe
JB
2622static void valleyview_irq_preinstall(struct drm_device *dev)
2623{
2624 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2625 int pipe;
2626
2627 atomic_set(&dev_priv->irq_received, 0);
2628
7e231dbe
JB
2629 /* VLV magic */
2630 I915_WRITE(VLV_IMR, 0);
2631 I915_WRITE(RING_IMR(RENDER_RING_BASE), 0);
2632 I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0);
2633 I915_WRITE(RING_IMR(BLT_RING_BASE), 0);
2634
7e231dbe
JB
2635 /* and GT */
2636 I915_WRITE(GTIIR, I915_READ(GTIIR));
2637 I915_WRITE(GTIIR, I915_READ(GTIIR));
2638 I915_WRITE(GTIMR, 0xffffffff);
2639 I915_WRITE(GTIER, 0x0);
2640 POSTING_READ(GTIER);
2641
2642 I915_WRITE(DPINVGTT, 0xff);
2643
2644 I915_WRITE(PORT_HOTPLUG_EN, 0);
2645 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2646 for_each_pipe(pipe)
2647 I915_WRITE(PIPESTAT(pipe), 0xffff);
2648 I915_WRITE(VLV_IIR, 0xffffffff);
2649 I915_WRITE(VLV_IMR, 0xffffffff);
2650 I915_WRITE(VLV_IER, 0x0);
2651 POSTING_READ(VLV_IER);
2652}
2653
82a28bcf 2654static void ibx_hpd_irq_setup(struct drm_device *dev)
7fe0b973
KP
2655{
2656 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
82a28bcf
DV
2657 struct drm_mode_config *mode_config = &dev->mode_config;
2658 struct intel_encoder *intel_encoder;
2659 u32 mask = ~I915_READ(SDEIMR);
2660 u32 hotplug;
2661
2662 if (HAS_PCH_IBX(dev)) {
995e6b3d 2663 mask &= ~SDE_HOTPLUG_MASK;
82a28bcf 2664 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
cd569aed
EE
2665 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2666 mask |= hpd_ibx[intel_encoder->hpd_pin];
82a28bcf 2667 } else {
995e6b3d 2668 mask &= ~SDE_HOTPLUG_MASK_CPT;
82a28bcf 2669 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
cd569aed
EE
2670 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
2671 mask |= hpd_cpt[intel_encoder->hpd_pin];
82a28bcf 2672 }
7fe0b973 2673
82a28bcf
DV
2674 I915_WRITE(SDEIMR, ~mask);
2675
2676 /*
2677 * Enable digital hotplug on the PCH, and configure the DP short pulse
2678 * duration to 2ms (which is the minimum in the Display Port spec)
2679 *
2680 * This register is the same on all known PCH chips.
2681 */
7fe0b973
KP
2682 hotplug = I915_READ(PCH_PORT_HOTPLUG);
2683 hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
2684 hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
2685 hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
2686 hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
2687 I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
2688}
2689
d46da437
PZ
2690static void ibx_irq_postinstall(struct drm_device *dev)
2691{
2692 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
82a28bcf 2693 u32 mask;
e5868a31 2694
692a04cf
DV
2695 if (HAS_PCH_NOP(dev))
2696 return;
2697
8664281b
PZ
2698 if (HAS_PCH_IBX(dev)) {
2699 mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
de032bf4 2700 SDE_TRANSA_FIFO_UNDER | SDE_POISON;
8664281b
PZ
2701 } else {
2702 mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
2703
2704 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
2705 }
ab5c608b 2706
d46da437
PZ
2707 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
2708 I915_WRITE(SDEIMR, ~mask);
d46da437
PZ
2709}
2710
f71d4af4 2711static int ironlake_irq_postinstall(struct drm_device *dev)
036a4a7d 2712{
4bc9d430
DV
2713 unsigned long irqflags;
2714
036a4a7d
ZW
2715 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2716 /* enable kind of interrupts always enabled */
013d5aa2 2717 u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
ce99c256 2718 DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
8664281b 2719 DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN |
de032bf4 2720 DE_PIPEA_FIFO_UNDERRUN | DE_POISON;
cc609d5d 2721 u32 gt_irqs;
036a4a7d 2722
1ec14ad3 2723 dev_priv->irq_mask = ~display_mask;
036a4a7d
ZW
2724
2725 /* should always can generate irq */
2726 I915_WRITE(DEIIR, I915_READ(DEIIR));
1ec14ad3
CW
2727 I915_WRITE(DEIMR, dev_priv->irq_mask);
2728 I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
3143a2bf 2729 POSTING_READ(DEIER);
036a4a7d 2730
1ec14ad3 2731 dev_priv->gt_irq_mask = ~0;
036a4a7d
ZW
2732
2733 I915_WRITE(GTIIR, I915_READ(GTIIR));
1ec14ad3 2734 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
881f47b6 2735
cc609d5d
BW
2736 gt_irqs = GT_RENDER_USER_INTERRUPT;
2737
1ec14ad3 2738 if (IS_GEN6(dev))
cc609d5d 2739 gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT;
1ec14ad3 2740 else
cc609d5d
BW
2741 gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT |
2742 ILK_BSD_USER_INTERRUPT;
2743
2744 I915_WRITE(GTIER, gt_irqs);
3143a2bf 2745 POSTING_READ(GTIER);
036a4a7d 2746
d46da437 2747 ibx_irq_postinstall(dev);
7fe0b973 2748
f97108d1
JB
2749 if (IS_IRONLAKE_M(dev)) {
2750 /* Clear & enable PCU event interrupts */
2751 I915_WRITE(DEIIR, DE_PCU_EVENT);
2752 I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
4bc9d430
DV
2753
2754 /* spinlocking not required here for correctness since interrupt
2755 * setup is guaranteed to run in single-threaded context. But we
2756 * need it to make the assert_spin_locked happy. */
2757 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
f97108d1 2758 ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
4bc9d430 2759 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
f97108d1
JB
2760 }
2761
036a4a7d
ZW
2762 return 0;
2763}
2764
f71d4af4 2765static int ivybridge_irq_postinstall(struct drm_device *dev)
b1f14ad0
JB
2766{
2767 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2768 /* enable kind of interrupts always enabled */
b615b57a
CW
2769 u32 display_mask =
2770 DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB |
2771 DE_PLANEC_FLIP_DONE_IVB |
2772 DE_PLANEB_FLIP_DONE_IVB |
ce99c256 2773 DE_PLANEA_FLIP_DONE_IVB |
8664281b
PZ
2774 DE_AUX_CHANNEL_A_IVB |
2775 DE_ERR_INT_IVB;
12638c57 2776 u32 pm_irqs = GEN6_PM_RPS_EVENTS;
cc609d5d 2777 u32 gt_irqs;
b1f14ad0 2778
b1f14ad0
JB
2779 dev_priv->irq_mask = ~display_mask;
2780
2781 /* should always can generate irq */
8664281b 2782 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
b1f14ad0
JB
2783 I915_WRITE(DEIIR, I915_READ(DEIIR));
2784 I915_WRITE(DEIMR, dev_priv->irq_mask);
b615b57a
CW
2785 I915_WRITE(DEIER,
2786 display_mask |
2787 DE_PIPEC_VBLANK_IVB |
2788 DE_PIPEB_VBLANK_IVB |
2789 DE_PIPEA_VBLANK_IVB);
b1f14ad0
JB
2790 POSTING_READ(DEIER);
2791
cc609d5d 2792 dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
b1f14ad0
JB
2793
2794 I915_WRITE(GTIIR, I915_READ(GTIIR));
2795 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
2796
cc609d5d
BW
2797 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2798 GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2799 I915_WRITE(GTIER, gt_irqs);
b1f14ad0
JB
2800 POSTING_READ(GTIER);
2801
12638c57
BW
2802 I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
2803 if (HAS_VEBOX(dev))
2804 pm_irqs |= PM_VEBOX_USER_INTERRUPT |
2805 PM_VEBOX_CS_ERROR_INTERRUPT;
2806
2807 /* Our enable/disable rps functions may touch these registers so
2808 * make sure to set a known state for only the non-RPS bits.
2809 * The RMW is extra paranoia since this should be called after being set
2810 * to a known state in preinstall.
2811 * */
2812 I915_WRITE(GEN6_PMIMR,
2813 (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs);
2814 I915_WRITE(GEN6_PMIER,
2815 (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs);
2816 POSTING_READ(GEN6_PMIER);
eda63ffb 2817
d46da437 2818 ibx_irq_postinstall(dev);
7fe0b973 2819
b1f14ad0
JB
2820 return 0;
2821}
2822
7e231dbe
JB
2823static int valleyview_irq_postinstall(struct drm_device *dev)
2824{
2825 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
cc609d5d 2826 u32 gt_irqs;
7e231dbe 2827 u32 enable_mask;
31acc7f5 2828 u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV;
7e231dbe
JB
2829
2830 enable_mask = I915_DISPLAY_PORT_INTERRUPT;
31acc7f5
JB
2831 enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2832 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2833 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
7e231dbe
JB
2834 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
2835
31acc7f5
JB
2836 /*
2837 *Leave vblank interrupts masked initially. enable/disable will
2838 * toggle them based on usage.
2839 */
2840 dev_priv->irq_mask = (~enable_mask) |
2841 I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT |
2842 I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
7e231dbe 2843
20afbda2
DV
2844 I915_WRITE(PORT_HOTPLUG_EN, 0);
2845 POSTING_READ(PORT_HOTPLUG_EN);
2846
7e231dbe
JB
2847 I915_WRITE(VLV_IMR, dev_priv->irq_mask);
2848 I915_WRITE(VLV_IER, enable_mask);
2849 I915_WRITE(VLV_IIR, 0xffffffff);
2850 I915_WRITE(PIPESTAT(0), 0xffff);
2851 I915_WRITE(PIPESTAT(1), 0xffff);
2852 POSTING_READ(VLV_IER);
2853
31acc7f5 2854 i915_enable_pipestat(dev_priv, 0, pipestat_enable);
515ac2bb 2855 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
31acc7f5
JB
2856 i915_enable_pipestat(dev_priv, 1, pipestat_enable);
2857
7e231dbe
JB
2858 I915_WRITE(VLV_IIR, 0xffffffff);
2859 I915_WRITE(VLV_IIR, 0xffffffff);
2860
7e231dbe 2861 I915_WRITE(GTIIR, I915_READ(GTIIR));
31acc7f5 2862 I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
3bcedbe5 2863
cc609d5d
BW
2864 gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT |
2865 GT_BLT_USER_INTERRUPT;
2866 I915_WRITE(GTIER, gt_irqs);
7e231dbe
JB
2867 POSTING_READ(GTIER);
2868
2869 /* ack & enable invalid PTE error interrupts */
2870#if 0 /* FIXME: add support to irq handler for checking these bits */
2871 I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK);
2872 I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK);
2873#endif
2874
2875 I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE);
20afbda2
DV
2876
2877 return 0;
2878}
2879
7e231dbe
JB
2880static void valleyview_irq_uninstall(struct drm_device *dev)
2881{
2882 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2883 int pipe;
2884
2885 if (!dev_priv)
2886 return;
2887
ac4c16c5
EE
2888 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2889
7e231dbe
JB
2890 for_each_pipe(pipe)
2891 I915_WRITE(PIPESTAT(pipe), 0xffff);
2892
2893 I915_WRITE(HWSTAM, 0xffffffff);
2894 I915_WRITE(PORT_HOTPLUG_EN, 0);
2895 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
2896 for_each_pipe(pipe)
2897 I915_WRITE(PIPESTAT(pipe), 0xffff);
2898 I915_WRITE(VLV_IIR, 0xffffffff);
2899 I915_WRITE(VLV_IMR, 0xffffffff);
2900 I915_WRITE(VLV_IER, 0x0);
2901 POSTING_READ(VLV_IER);
2902}
2903
f71d4af4 2904static void ironlake_irq_uninstall(struct drm_device *dev)
036a4a7d
ZW
2905{
2906 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
4697995b
JB
2907
2908 if (!dev_priv)
2909 return;
2910
ac4c16c5
EE
2911 del_timer_sync(&dev_priv->hotplug_reenable_timer);
2912
036a4a7d
ZW
2913 I915_WRITE(HWSTAM, 0xffffffff);
2914
2915 I915_WRITE(DEIMR, 0xffffffff);
2916 I915_WRITE(DEIER, 0x0);
2917 I915_WRITE(DEIIR, I915_READ(DEIIR));
8664281b
PZ
2918 if (IS_GEN7(dev))
2919 I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
036a4a7d
ZW
2920
2921 I915_WRITE(GTIMR, 0xffffffff);
2922 I915_WRITE(GTIER, 0x0);
2923 I915_WRITE(GTIIR, I915_READ(GTIIR));
192aac1f 2924
ab5c608b
BW
2925 if (HAS_PCH_NOP(dev))
2926 return;
2927
192aac1f
KP
2928 I915_WRITE(SDEIMR, 0xffffffff);
2929 I915_WRITE(SDEIER, 0x0);
2930 I915_WRITE(SDEIIR, I915_READ(SDEIIR));
8664281b
PZ
2931 if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev))
2932 I915_WRITE(SERR_INT, I915_READ(SERR_INT));
036a4a7d
ZW
2933}
2934
a266c7d5 2935static void i8xx_irq_preinstall(struct drm_device * dev)
1da177e4
LT
2936{
2937 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
9db4a9c7 2938 int pipe;
91e3738e 2939
a266c7d5 2940 atomic_set(&dev_priv->irq_received, 0);
5ca58282 2941
9db4a9c7
JB
2942 for_each_pipe(pipe)
2943 I915_WRITE(PIPESTAT(pipe), 0);
a266c7d5
CW
2944 I915_WRITE16(IMR, 0xffff);
2945 I915_WRITE16(IER, 0x0);
2946 POSTING_READ16(IER);
c2798b19
CW
2947}
2948
2949static int i8xx_irq_postinstall(struct drm_device *dev)
2950{
2951 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
2952
c2798b19
CW
2953 I915_WRITE16(EMR,
2954 ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
2955
2956 /* Unmask the interrupts that we always want on. */
2957 dev_priv->irq_mask =
2958 ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2959 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2960 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
2961 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
2962 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
2963 I915_WRITE16(IMR, dev_priv->irq_mask);
2964
2965 I915_WRITE16(IER,
2966 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
2967 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
2968 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
2969 I915_USER_INTERRUPT);
2970 POSTING_READ16(IER);
2971
2972 return 0;
2973}
2974
90a72f87
VS
2975/*
2976 * Returns true when a page flip has completed.
2977 */
2978static bool i8xx_handle_vblank(struct drm_device *dev,
2979 int pipe, u16 iir)
2980{
2981 drm_i915_private_t *dev_priv = dev->dev_private;
2982 u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe);
2983
2984 if (!drm_handle_vblank(dev, pipe))
2985 return false;
2986
2987 if ((iir & flip_pending) == 0)
2988 return false;
2989
2990 intel_prepare_page_flip(dev, pipe);
2991
2992 /* We detect FlipDone by looking for the change in PendingFlip from '1'
2993 * to '0' on the following vblank, i.e. IIR has the Pendingflip
2994 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
2995 * the flip is completed (no longer pending). Since this doesn't raise
2996 * an interrupt per se, we watch for the change at vblank.
2997 */
2998 if (I915_READ16(ISR) & flip_pending)
2999 return false;
3000
3001 intel_finish_page_flip(dev, pipe);
3002
3003 return true;
3004}
3005
ff1f525e 3006static irqreturn_t i8xx_irq_handler(int irq, void *arg)
c2798b19
CW
3007{
3008 struct drm_device *dev = (struct drm_device *) arg;
3009 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
c2798b19
CW
3010 u16 iir, new_iir;
3011 u32 pipe_stats[2];
3012 unsigned long irqflags;
3013 int irq_received;
3014 int pipe;
3015 u16 flip_mask =
3016 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3017 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
3018
3019 atomic_inc(&dev_priv->irq_received);
3020
3021 iir = I915_READ16(IIR);
3022 if (iir == 0)
3023 return IRQ_NONE;
3024
3025 while (iir & ~flip_mask) {
3026 /* Can't rely on pipestat interrupt bit in iir as it might
3027 * have been cleared after the pipestat interrupt was received.
3028 * It doesn't set the bit in iir again, but it still produces
3029 * interrupts (for non-MSI).
3030 */
3031 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3032 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3033 i915_handle_error(dev, false);
3034
3035 for_each_pipe(pipe) {
3036 int reg = PIPESTAT(pipe);
3037 pipe_stats[pipe] = I915_READ(reg);
3038
3039 /*
3040 * Clear the PIPE*STAT regs before the IIR
3041 */
3042 if (pipe_stats[pipe] & 0x8000ffff) {
3043 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3044 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3045 pipe_name(pipe));
3046 I915_WRITE(reg, pipe_stats[pipe]);
3047 irq_received = 1;
3048 }
3049 }
3050 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3051
3052 I915_WRITE16(IIR, iir & ~flip_mask);
3053 new_iir = I915_READ16(IIR); /* Flush posted writes */
3054
d05c617e 3055 i915_update_dri1_breadcrumb(dev);
c2798b19
CW
3056
3057 if (iir & I915_USER_INTERRUPT)
3058 notify_ring(dev, &dev_priv->ring[RCS]);
3059
3060 if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3061 i8xx_handle_vblank(dev, 0, iir))
3062 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0);
c2798b19
CW
3063
3064 if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3065 i8xx_handle_vblank(dev, 1, iir))
3066 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1);
c2798b19
CW
3067
3068 iir = new_iir;
3069 }
3070
3071 return IRQ_HANDLED;
3072}
3073
3074static void i8xx_irq_uninstall(struct drm_device * dev)
3075{
3076 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3077 int pipe;
3078
c2798b19
CW
3079 for_each_pipe(pipe) {
3080 /* Clear enable bits; then clear status bits */
3081 I915_WRITE(PIPESTAT(pipe), 0);
3082 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3083 }
3084 I915_WRITE16(IMR, 0xffff);
3085 I915_WRITE16(IER, 0x0);
3086 I915_WRITE16(IIR, I915_READ16(IIR));
3087}
3088
a266c7d5
CW
3089static void i915_irq_preinstall(struct drm_device * dev)
3090{
3091 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3092 int pipe;
3093
3094 atomic_set(&dev_priv->irq_received, 0);
3095
3096 if (I915_HAS_HOTPLUG(dev)) {
3097 I915_WRITE(PORT_HOTPLUG_EN, 0);
3098 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3099 }
3100
00d98ebd 3101 I915_WRITE16(HWSTAM, 0xeffe);
a266c7d5
CW
3102 for_each_pipe(pipe)
3103 I915_WRITE(PIPESTAT(pipe), 0);
3104 I915_WRITE(IMR, 0xffffffff);
3105 I915_WRITE(IER, 0x0);
3106 POSTING_READ(IER);
3107}
3108
3109static int i915_irq_postinstall(struct drm_device *dev)
3110{
3111 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
38bde180 3112 u32 enable_mask;
a266c7d5 3113
38bde180
CW
3114 I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH));
3115
3116 /* Unmask the interrupts that we always want on. */
3117 dev_priv->irq_mask =
3118 ~(I915_ASLE_INTERRUPT |
3119 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3120 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3121 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3122 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3123 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3124
3125 enable_mask =
3126 I915_ASLE_INTERRUPT |
3127 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3128 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3129 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT |
3130 I915_USER_INTERRUPT;
3131
a266c7d5 3132 if (I915_HAS_HOTPLUG(dev)) {
20afbda2
DV
3133 I915_WRITE(PORT_HOTPLUG_EN, 0);
3134 POSTING_READ(PORT_HOTPLUG_EN);
3135
a266c7d5
CW
3136 /* Enable in IER... */
3137 enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
3138 /* and unmask in IMR */
3139 dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
3140 }
3141
a266c7d5
CW
3142 I915_WRITE(IMR, dev_priv->irq_mask);
3143 I915_WRITE(IER, enable_mask);
3144 POSTING_READ(IER);
3145
f49e38dd 3146 i915_enable_asle_pipestat(dev);
20afbda2
DV
3147
3148 return 0;
3149}
3150
90a72f87
VS
3151/*
3152 * Returns true when a page flip has completed.
3153 */
3154static bool i915_handle_vblank(struct drm_device *dev,
3155 int plane, int pipe, u32 iir)
3156{
3157 drm_i915_private_t *dev_priv = dev->dev_private;
3158 u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane);
3159
3160 if (!drm_handle_vblank(dev, pipe))
3161 return false;
3162
3163 if ((iir & flip_pending) == 0)
3164 return false;
3165
3166 intel_prepare_page_flip(dev, plane);
3167
3168 /* We detect FlipDone by looking for the change in PendingFlip from '1'
3169 * to '0' on the following vblank, i.e. IIR has the Pendingflip
3170 * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
3171 * the flip is completed (no longer pending). Since this doesn't raise
3172 * an interrupt per se, we watch for the change at vblank.
3173 */
3174 if (I915_READ(ISR) & flip_pending)
3175 return false;
3176
3177 intel_finish_page_flip(dev, pipe);
3178
3179 return true;
3180}
3181
ff1f525e 3182static irqreturn_t i915_irq_handler(int irq, void *arg)
a266c7d5
CW
3183{
3184 struct drm_device *dev = (struct drm_device *) arg;
3185 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
8291ee90 3186 u32 iir, new_iir, pipe_stats[I915_MAX_PIPES];
a266c7d5 3187 unsigned long irqflags;
38bde180
CW
3188 u32 flip_mask =
3189 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3190 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
38bde180 3191 int pipe, ret = IRQ_NONE;
a266c7d5
CW
3192
3193 atomic_inc(&dev_priv->irq_received);
3194
3195 iir = I915_READ(IIR);
38bde180
CW
3196 do {
3197 bool irq_received = (iir & ~flip_mask) != 0;
8291ee90 3198 bool blc_event = false;
a266c7d5
CW
3199
3200 /* Can't rely on pipestat interrupt bit in iir as it might
3201 * have been cleared after the pipestat interrupt was received.
3202 * It doesn't set the bit in iir again, but it still produces
3203 * interrupts (for non-MSI).
3204 */
3205 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3206 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3207 i915_handle_error(dev, false);
3208
3209 for_each_pipe(pipe) {
3210 int reg = PIPESTAT(pipe);
3211 pipe_stats[pipe] = I915_READ(reg);
3212
38bde180 3213 /* Clear the PIPE*STAT regs before the IIR */
a266c7d5
CW
3214 if (pipe_stats[pipe] & 0x8000ffff) {
3215 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3216 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3217 pipe_name(pipe));
3218 I915_WRITE(reg, pipe_stats[pipe]);
38bde180 3219 irq_received = true;
a266c7d5
CW
3220 }
3221 }
3222 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3223
3224 if (!irq_received)
3225 break;
3226
a266c7d5
CW
3227 /* Consume port. Then clear IIR or we'll miss events */
3228 if ((I915_HAS_HOTPLUG(dev)) &&
3229 (iir & I915_DISPLAY_PORT_INTERRUPT)) {
3230 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
b543fb04 3231 u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915;
a266c7d5
CW
3232
3233 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3234 hotplug_status);
b543fb04 3235 if (hotplug_trigger) {
cd569aed
EE
3236 if (hotplug_irq_storm_detect(dev, hotplug_trigger, hpd_status_i915))
3237 i915_hpd_irq_setup(dev);
a266c7d5
CW
3238 queue_work(dev_priv->wq,
3239 &dev_priv->hotplug_work);
b543fb04 3240 }
a266c7d5 3241 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
38bde180 3242 POSTING_READ(PORT_HOTPLUG_STAT);
a266c7d5
CW
3243 }
3244
38bde180 3245 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
3246 new_iir = I915_READ(IIR); /* Flush posted writes */
3247
a266c7d5
CW
3248 if (iir & I915_USER_INTERRUPT)
3249 notify_ring(dev, &dev_priv->ring[RCS]);
a266c7d5 3250
a266c7d5 3251 for_each_pipe(pipe) {
38bde180
CW
3252 int plane = pipe;
3253 if (IS_MOBILE(dev))
3254 plane = !plane;
90a72f87 3255
8291ee90 3256 if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3257 i915_handle_vblank(dev, plane, pipe, iir))
3258 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane);
a266c7d5
CW
3259
3260 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3261 blc_event = true;
3262 }
3263
a266c7d5
CW
3264 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3265 intel_opregion_asle_intr(dev);
3266
3267 /* With MSI, interrupts are only generated when iir
3268 * transitions from zero to nonzero. If another bit got
3269 * set while we were handling the existing iir bits, then
3270 * we would never get another interrupt.
3271 *
3272 * This is fine on non-MSI as well, as if we hit this path
3273 * we avoid exiting the interrupt handler only to generate
3274 * another one.
3275 *
3276 * Note that for MSI this could cause a stray interrupt report
3277 * if an interrupt landed in the time between writing IIR and
3278 * the posting read. This should be rare enough to never
3279 * trigger the 99% of 100,000 interrupts test for disabling
3280 * stray interrupts.
3281 */
38bde180 3282 ret = IRQ_HANDLED;
a266c7d5 3283 iir = new_iir;
38bde180 3284 } while (iir & ~flip_mask);
a266c7d5 3285
d05c617e 3286 i915_update_dri1_breadcrumb(dev);
8291ee90 3287
a266c7d5
CW
3288 return ret;
3289}
3290
3291static void i915_irq_uninstall(struct drm_device * dev)
3292{
3293 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3294 int pipe;
3295
ac4c16c5
EE
3296 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3297
a266c7d5
CW
3298 if (I915_HAS_HOTPLUG(dev)) {
3299 I915_WRITE(PORT_HOTPLUG_EN, 0);
3300 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
3301 }
3302
00d98ebd 3303 I915_WRITE16(HWSTAM, 0xffff);
55b39755
CW
3304 for_each_pipe(pipe) {
3305 /* Clear enable bits; then clear status bits */
a266c7d5 3306 I915_WRITE(PIPESTAT(pipe), 0);
55b39755
CW
3307 I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe)));
3308 }
a266c7d5
CW
3309 I915_WRITE(IMR, 0xffffffff);
3310 I915_WRITE(IER, 0x0);
3311
a266c7d5
CW
3312 I915_WRITE(IIR, I915_READ(IIR));
3313}
3314
3315static void i965_irq_preinstall(struct drm_device * dev)
3316{
3317 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3318 int pipe;
3319
3320 atomic_set(&dev_priv->irq_received, 0);
3321
adca4730
CW
3322 I915_WRITE(PORT_HOTPLUG_EN, 0);
3323 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
3324
3325 I915_WRITE(HWSTAM, 0xeffe);
3326 for_each_pipe(pipe)
3327 I915_WRITE(PIPESTAT(pipe), 0);
3328 I915_WRITE(IMR, 0xffffffff);
3329 I915_WRITE(IER, 0x0);
3330 POSTING_READ(IER);
3331}
3332
3333static int i965_irq_postinstall(struct drm_device *dev)
3334{
3335 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
bbba0a97 3336 u32 enable_mask;
a266c7d5
CW
3337 u32 error_mask;
3338
a266c7d5 3339 /* Unmask the interrupts that we always want on. */
bbba0a97 3340 dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT |
adca4730 3341 I915_DISPLAY_PORT_INTERRUPT |
bbba0a97
CW
3342 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |
3343 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |
3344 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3345 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
3346 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT);
3347
3348 enable_mask = ~dev_priv->irq_mask;
21ad8330
VS
3349 enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3350 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT);
bbba0a97
CW
3351 enable_mask |= I915_USER_INTERRUPT;
3352
3353 if (IS_G4X(dev))
3354 enable_mask |= I915_BSD_USER_INTERRUPT;
a266c7d5 3355
515ac2bb 3356 i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE);
a266c7d5 3357
a266c7d5
CW
3358 /*
3359 * Enable some error detection, note the instruction error mask
3360 * bit is reserved, so we leave it masked.
3361 */
3362 if (IS_G4X(dev)) {
3363 error_mask = ~(GM45_ERROR_PAGE_TABLE |
3364 GM45_ERROR_MEM_PRIV |
3365 GM45_ERROR_CP_PRIV |
3366 I915_ERROR_MEMORY_REFRESH);
3367 } else {
3368 error_mask = ~(I915_ERROR_PAGE_TABLE |
3369 I915_ERROR_MEMORY_REFRESH);
3370 }
3371 I915_WRITE(EMR, error_mask);
3372
3373 I915_WRITE(IMR, dev_priv->irq_mask);
3374 I915_WRITE(IER, enable_mask);
3375 POSTING_READ(IER);
3376
20afbda2
DV
3377 I915_WRITE(PORT_HOTPLUG_EN, 0);
3378 POSTING_READ(PORT_HOTPLUG_EN);
3379
f49e38dd 3380 i915_enable_asle_pipestat(dev);
20afbda2
DV
3381
3382 return 0;
3383}
3384
bac56d5b 3385static void i915_hpd_irq_setup(struct drm_device *dev)
20afbda2
DV
3386{
3387 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
e5868a31 3388 struct drm_mode_config *mode_config = &dev->mode_config;
cd569aed 3389 struct intel_encoder *intel_encoder;
20afbda2
DV
3390 u32 hotplug_en;
3391
bac56d5b
EE
3392 if (I915_HAS_HOTPLUG(dev)) {
3393 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
3394 hotplug_en &= ~HOTPLUG_INT_EN_MASK;
3395 /* Note HDMI and DP share hotplug bits */
e5868a31 3396 /* enable bits are the same for all generations */
cd569aed
EE
3397 list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head)
3398 if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED)
3399 hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin];
bac56d5b
EE
3400 /* Programming the CRT detection parameters tends
3401 to generate a spurious hotplug event about three
3402 seconds later. So just do it once.
3403 */
3404 if (IS_G4X(dev))
3405 hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
85fc95ba 3406 hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK;
bac56d5b 3407 hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
a266c7d5 3408
bac56d5b
EE
3409 /* Ignore TV since it's buggy */
3410 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
3411 }
a266c7d5
CW
3412}
3413
ff1f525e 3414static irqreturn_t i965_irq_handler(int irq, void *arg)
a266c7d5
CW
3415{
3416 struct drm_device *dev = (struct drm_device *) arg;
3417 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
a266c7d5
CW
3418 u32 iir, new_iir;
3419 u32 pipe_stats[I915_MAX_PIPES];
a266c7d5
CW
3420 unsigned long irqflags;
3421 int irq_received;
3422 int ret = IRQ_NONE, pipe;
21ad8330
VS
3423 u32 flip_mask =
3424 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
3425 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
a266c7d5
CW
3426
3427 atomic_inc(&dev_priv->irq_received);
3428
3429 iir = I915_READ(IIR);
3430
a266c7d5 3431 for (;;) {
2c8ba29f
CW
3432 bool blc_event = false;
3433
21ad8330 3434 irq_received = (iir & ~flip_mask) != 0;
a266c7d5
CW
3435
3436 /* Can't rely on pipestat interrupt bit in iir as it might
3437 * have been cleared after the pipestat interrupt was received.
3438 * It doesn't set the bit in iir again, but it still produces
3439 * interrupts (for non-MSI).
3440 */
3441 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3442 if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
3443 i915_handle_error(dev, false);
3444
3445 for_each_pipe(pipe) {
3446 int reg = PIPESTAT(pipe);
3447 pipe_stats[pipe] = I915_READ(reg);
3448
3449 /*
3450 * Clear the PIPE*STAT regs before the IIR
3451 */
3452 if (pipe_stats[pipe] & 0x8000ffff) {
3453 if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS)
3454 DRM_DEBUG_DRIVER("pipe %c underrun\n",
3455 pipe_name(pipe));
3456 I915_WRITE(reg, pipe_stats[pipe]);
3457 irq_received = 1;
3458 }
3459 }
3460 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3461
3462 if (!irq_received)
3463 break;
3464
3465 ret = IRQ_HANDLED;
3466
3467 /* Consume port. Then clear IIR or we'll miss events */
adca4730 3468 if (iir & I915_DISPLAY_PORT_INTERRUPT) {
a266c7d5 3469 u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
b543fb04
EE
3470 u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ?
3471 HOTPLUG_INT_STATUS_G4X :
4f7fd709 3472 HOTPLUG_INT_STATUS_I915);
a266c7d5
CW
3473
3474 DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
3475 hotplug_status);
b543fb04 3476 if (hotplug_trigger) {
cd569aed 3477 if (hotplug_irq_storm_detect(dev, hotplug_trigger,
4f7fd709 3478 IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915))
cd569aed 3479 i915_hpd_irq_setup(dev);
a266c7d5
CW
3480 queue_work(dev_priv->wq,
3481 &dev_priv->hotplug_work);
b543fb04 3482 }
a266c7d5
CW
3483 I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
3484 I915_READ(PORT_HOTPLUG_STAT);
3485 }
3486
21ad8330 3487 I915_WRITE(IIR, iir & ~flip_mask);
a266c7d5
CW
3488 new_iir = I915_READ(IIR); /* Flush posted writes */
3489
a266c7d5
CW
3490 if (iir & I915_USER_INTERRUPT)
3491 notify_ring(dev, &dev_priv->ring[RCS]);
3492 if (iir & I915_BSD_USER_INTERRUPT)
3493 notify_ring(dev, &dev_priv->ring[VCS]);
3494
a266c7d5 3495 for_each_pipe(pipe) {
2c8ba29f 3496 if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS &&
90a72f87
VS
3497 i915_handle_vblank(dev, pipe, pipe, iir))
3498 flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe);
a266c7d5
CW
3499
3500 if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
3501 blc_event = true;
3502 }
3503
3504
3505 if (blc_event || (iir & I915_ASLE_INTERRUPT))
3506 intel_opregion_asle_intr(dev);
3507
515ac2bb
DV
3508 if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS)
3509 gmbus_irq_handler(dev);
3510
a266c7d5
CW
3511 /* With MSI, interrupts are only generated when iir
3512 * transitions from zero to nonzero. If another bit got
3513 * set while we were handling the existing iir bits, then
3514 * we would never get another interrupt.
3515 *
3516 * This is fine on non-MSI as well, as if we hit this path
3517 * we avoid exiting the interrupt handler only to generate
3518 * another one.
3519 *
3520 * Note that for MSI this could cause a stray interrupt report
3521 * if an interrupt landed in the time between writing IIR and
3522 * the posting read. This should be rare enough to never
3523 * trigger the 99% of 100,000 interrupts test for disabling
3524 * stray interrupts.
3525 */
3526 iir = new_iir;
3527 }
3528
d05c617e 3529 i915_update_dri1_breadcrumb(dev);
2c8ba29f 3530
a266c7d5
CW
3531 return ret;
3532}
3533
3534static void i965_irq_uninstall(struct drm_device * dev)
3535{
3536 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
3537 int pipe;
3538
3539 if (!dev_priv)
3540 return;
3541
ac4c16c5
EE
3542 del_timer_sync(&dev_priv->hotplug_reenable_timer);
3543
adca4730
CW
3544 I915_WRITE(PORT_HOTPLUG_EN, 0);
3545 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
a266c7d5
CW
3546
3547 I915_WRITE(HWSTAM, 0xffffffff);
3548 for_each_pipe(pipe)
3549 I915_WRITE(PIPESTAT(pipe), 0);
3550 I915_WRITE(IMR, 0xffffffff);
3551 I915_WRITE(IER, 0x0);
3552
3553 for_each_pipe(pipe)
3554 I915_WRITE(PIPESTAT(pipe),
3555 I915_READ(PIPESTAT(pipe)) & 0x8000ffff);
3556 I915_WRITE(IIR, I915_READ(IIR));
3557}
3558
ac4c16c5
EE
3559static void i915_reenable_hotplug_timer_func(unsigned long data)
3560{
3561 drm_i915_private_t *dev_priv = (drm_i915_private_t *)data;
3562 struct drm_device *dev = dev_priv->dev;
3563 struct drm_mode_config *mode_config = &dev->mode_config;
3564 unsigned long irqflags;
3565 int i;
3566
3567 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3568 for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) {
3569 struct drm_connector *connector;
3570
3571 if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED)
3572 continue;
3573
3574 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3575
3576 list_for_each_entry(connector, &mode_config->connector_list, head) {
3577 struct intel_connector *intel_connector = to_intel_connector(connector);
3578
3579 if (intel_connector->encoder->hpd_pin == i) {
3580 if (connector->polled != intel_connector->polled)
3581 DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
3582 drm_get_connector_name(connector));
3583 connector->polled = intel_connector->polled;
3584 if (!connector->polled)
3585 connector->polled = DRM_CONNECTOR_POLL_HPD;
3586 }
3587 }
3588 }
3589 if (dev_priv->display.hpd_irq_setup)
3590 dev_priv->display.hpd_irq_setup(dev);
3591 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3592}
3593
f71d4af4
JB
3594void intel_irq_init(struct drm_device *dev)
3595{
8b2e326d
CW
3596 struct drm_i915_private *dev_priv = dev->dev_private;
3597
3598 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
99584db3 3599 INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
c6a828d3 3600 INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
a4da4fa4 3601 INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
8b2e326d 3602
99584db3
DV
3603 setup_timer(&dev_priv->gpu_error.hangcheck_timer,
3604 i915_hangcheck_elapsed,
61bac78e 3605 (unsigned long) dev);
ac4c16c5
EE
3606 setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func,
3607 (unsigned long) dev_priv);
61bac78e 3608
97a19a24 3609 pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
9ee32fea 3610
f71d4af4
JB
3611 dev->driver->get_vblank_counter = i915_get_vblank_counter;
3612 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
7d4e146f 3613 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) {
f71d4af4
JB
3614 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
3615 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
3616 }
3617
c3613de9
KP
3618 if (drm_core_check_feature(dev, DRIVER_MODESET))
3619 dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp;
3620 else
3621 dev->driver->get_vblank_timestamp = NULL;
f71d4af4
JB
3622 dev->driver->get_scanout_position = i915_get_crtc_scanoutpos;
3623
7e231dbe
JB
3624 if (IS_VALLEYVIEW(dev)) {
3625 dev->driver->irq_handler = valleyview_irq_handler;
3626 dev->driver->irq_preinstall = valleyview_irq_preinstall;
3627 dev->driver->irq_postinstall = valleyview_irq_postinstall;
3628 dev->driver->irq_uninstall = valleyview_irq_uninstall;
3629 dev->driver->enable_vblank = valleyview_enable_vblank;
3630 dev->driver->disable_vblank = valleyview_disable_vblank;
fa00abe0 3631 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
4a06e201 3632 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
7d99163d 3633 /* Share uninstall handlers with ILK/SNB */
f71d4af4 3634 dev->driver->irq_handler = ivybridge_irq_handler;
7d99163d 3635 dev->driver->irq_preinstall = ivybridge_irq_preinstall;
f71d4af4
JB
3636 dev->driver->irq_postinstall = ivybridge_irq_postinstall;
3637 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3638 dev->driver->enable_vblank = ivybridge_enable_vblank;
3639 dev->driver->disable_vblank = ivybridge_disable_vblank;
82a28bcf 3640 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4
JB
3641 } else if (HAS_PCH_SPLIT(dev)) {
3642 dev->driver->irq_handler = ironlake_irq_handler;
3643 dev->driver->irq_preinstall = ironlake_irq_preinstall;
3644 dev->driver->irq_postinstall = ironlake_irq_postinstall;
3645 dev->driver->irq_uninstall = ironlake_irq_uninstall;
3646 dev->driver->enable_vblank = ironlake_enable_vblank;
3647 dev->driver->disable_vblank = ironlake_disable_vblank;
82a28bcf 3648 dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup;
f71d4af4 3649 } else {
c2798b19
CW
3650 if (INTEL_INFO(dev)->gen == 2) {
3651 dev->driver->irq_preinstall = i8xx_irq_preinstall;
3652 dev->driver->irq_postinstall = i8xx_irq_postinstall;
3653 dev->driver->irq_handler = i8xx_irq_handler;
3654 dev->driver->irq_uninstall = i8xx_irq_uninstall;
a266c7d5
CW
3655 } else if (INTEL_INFO(dev)->gen == 3) {
3656 dev->driver->irq_preinstall = i915_irq_preinstall;
3657 dev->driver->irq_postinstall = i915_irq_postinstall;
3658 dev->driver->irq_uninstall = i915_irq_uninstall;
3659 dev->driver->irq_handler = i915_irq_handler;
20afbda2 3660 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 3661 } else {
a266c7d5
CW
3662 dev->driver->irq_preinstall = i965_irq_preinstall;
3663 dev->driver->irq_postinstall = i965_irq_postinstall;
3664 dev->driver->irq_uninstall = i965_irq_uninstall;
3665 dev->driver->irq_handler = i965_irq_handler;
bac56d5b 3666 dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup;
c2798b19 3667 }
f71d4af4
JB
3668 dev->driver->enable_vblank = i915_enable_vblank;
3669 dev->driver->disable_vblank = i915_disable_vblank;
3670 }
3671}
20afbda2
DV
3672
3673void intel_hpd_init(struct drm_device *dev)
3674{
3675 struct drm_i915_private *dev_priv = dev->dev_private;
821450c6
EE
3676 struct drm_mode_config *mode_config = &dev->mode_config;
3677 struct drm_connector *connector;
3678 int i;
20afbda2 3679
821450c6
EE
3680 for (i = 1; i < HPD_NUM_PINS; i++) {
3681 dev_priv->hpd_stats[i].hpd_cnt = 0;
3682 dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED;
3683 }
3684 list_for_each_entry(connector, &mode_config->connector_list, head) {
3685 struct intel_connector *intel_connector = to_intel_connector(connector);
3686 connector->polled = intel_connector->polled;
3687 if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE)
3688 connector->polled = DRM_CONNECTOR_POLL_HPD;
3689 }
20afbda2
DV
3690 if (dev_priv->display.hpd_irq_setup)
3691 dev_priv->display.hpd_irq_setup(dev);
3692}
This page took 1.041349 seconds and 5 git commands to generate.