| 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
| 2 | */ |
| 3 | /* |
| 4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
| 5 | * All Rights Reserved. |
| 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 8 | * copy of this software and associated documentation files (the |
| 9 | * "Software"), to deal in the Software without restriction, including |
| 10 | * without limitation the rights to use, copy, modify, merge, publish, |
| 11 | * distribute, sub license, and/or sell copies of the Software, and to |
| 12 | * permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice (including the |
| 16 | * next paragraph) shall be included in all copies or substantial portions |
| 17 | * of the Software. |
| 18 | * |
| 19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS |
| 20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. |
| 22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR |
| 23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, |
| 24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE |
| 25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. |
| 26 | * |
| 27 | */ |
| 28 | |
| 29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 30 | |
| 31 | #include <linux/sysrq.h> |
| 32 | #include <linux/slab.h> |
| 33 | #include <drm/drmP.h> |
| 34 | #include <drm/i915_drm.h> |
| 35 | #include "i915_drv.h" |
| 36 | #include "i915_trace.h" |
| 37 | #include "intel_drv.h" |
| 38 | |
| 39 | /* For display hotplug interrupt */ |
| 40 | static void |
| 41 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
| 42 | { |
| 43 | if ((dev_priv->irq_mask & mask) != 0) { |
| 44 | dev_priv->irq_mask &= ~mask; |
| 45 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
| 46 | POSTING_READ(DEIMR); |
| 47 | } |
| 48 | } |
| 49 | |
| 50 | static inline void |
| 51 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
| 52 | { |
| 53 | if ((dev_priv->irq_mask & mask) != mask) { |
| 54 | dev_priv->irq_mask |= mask; |
| 55 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
| 56 | POSTING_READ(DEIMR); |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | void |
| 61 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
| 62 | { |
| 63 | if ((dev_priv->pipestat[pipe] & mask) != mask) { |
| 64 | u32 reg = PIPESTAT(pipe); |
| 65 | |
| 66 | dev_priv->pipestat[pipe] |= mask; |
| 67 | /* Enable the interrupt, clear any pending status */ |
| 68 | I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16)); |
| 69 | POSTING_READ(reg); |
| 70 | } |
| 71 | } |
| 72 | |
| 73 | void |
| 74 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) |
| 75 | { |
| 76 | if ((dev_priv->pipestat[pipe] & mask) != 0) { |
| 77 | u32 reg = PIPESTAT(pipe); |
| 78 | |
| 79 | dev_priv->pipestat[pipe] &= ~mask; |
| 80 | I915_WRITE(reg, dev_priv->pipestat[pipe]); |
| 81 | POSTING_READ(reg); |
| 82 | } |
| 83 | } |
| 84 | |
| 85 | /** |
| 86 | * intel_enable_asle - enable ASLE interrupt for OpRegion |
| 87 | */ |
| 88 | void intel_enable_asle(struct drm_device *dev) |
| 89 | { |
| 90 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 91 | unsigned long irqflags; |
| 92 | |
| 93 | /* FIXME: opregion/asle for VLV */ |
| 94 | if (IS_VALLEYVIEW(dev)) |
| 95 | return; |
| 96 | |
| 97 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 98 | |
| 99 | if (HAS_PCH_SPLIT(dev)) |
| 100 | ironlake_enable_display_irq(dev_priv, DE_GSE); |
| 101 | else { |
| 102 | i915_enable_pipestat(dev_priv, 1, |
| 103 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
| 104 | if (INTEL_INFO(dev)->gen >= 4) |
| 105 | i915_enable_pipestat(dev_priv, 0, |
| 106 | PIPE_LEGACY_BLC_EVENT_ENABLE); |
| 107 | } |
| 108 | |
| 109 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 110 | } |
| 111 | |
| 112 | /** |
| 113 | * i915_pipe_enabled - check if a pipe is enabled |
| 114 | * @dev: DRM device |
| 115 | * @pipe: pipe to check |
| 116 | * |
| 117 | * Reading certain registers when the pipe is disabled can hang the chip. |
| 118 | * Use this routine to make sure the PLL is running and the pipe is active |
| 119 | * before reading such registers if unsure. |
| 120 | */ |
| 121 | static int |
| 122 | i915_pipe_enabled(struct drm_device *dev, int pipe) |
| 123 | { |
| 124 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 125 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
| 126 | pipe); |
| 127 | |
| 128 | return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_ENABLE; |
| 129 | } |
| 130 | |
| 131 | /* Called from drm generic code, passed a 'crtc', which |
| 132 | * we use as a pipe index |
| 133 | */ |
| 134 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
| 135 | { |
| 136 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 137 | unsigned long high_frame; |
| 138 | unsigned long low_frame; |
| 139 | u32 high1, high2, low; |
| 140 | |
| 141 | if (!i915_pipe_enabled(dev, pipe)) { |
| 142 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
| 143 | "pipe %c\n", pipe_name(pipe)); |
| 144 | return 0; |
| 145 | } |
| 146 | |
| 147 | high_frame = PIPEFRAME(pipe); |
| 148 | low_frame = PIPEFRAMEPIXEL(pipe); |
| 149 | |
| 150 | /* |
| 151 | * High & low register fields aren't synchronized, so make sure |
| 152 | * we get a low value that's stable across two reads of the high |
| 153 | * register. |
| 154 | */ |
| 155 | do { |
| 156 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
| 157 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; |
| 158 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
| 159 | } while (high1 != high2); |
| 160 | |
| 161 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
| 162 | low >>= PIPE_FRAME_LOW_SHIFT; |
| 163 | return (high1 << 8) | low; |
| 164 | } |
| 165 | |
| 166 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
| 167 | { |
| 168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 169 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
| 170 | |
| 171 | if (!i915_pipe_enabled(dev, pipe)) { |
| 172 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
| 173 | "pipe %c\n", pipe_name(pipe)); |
| 174 | return 0; |
| 175 | } |
| 176 | |
| 177 | return I915_READ(reg); |
| 178 | } |
| 179 | |
| 180 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
| 181 | int *vpos, int *hpos) |
| 182 | { |
| 183 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 184 | u32 vbl = 0, position = 0; |
| 185 | int vbl_start, vbl_end, htotal, vtotal; |
| 186 | bool in_vbl = true; |
| 187 | int ret = 0; |
| 188 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
| 189 | pipe); |
| 190 | |
| 191 | if (!i915_pipe_enabled(dev, pipe)) { |
| 192 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " |
| 193 | "pipe %c\n", pipe_name(pipe)); |
| 194 | return 0; |
| 195 | } |
| 196 | |
| 197 | /* Get vtotal. */ |
| 198 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
| 199 | |
| 200 | if (INTEL_INFO(dev)->gen >= 4) { |
| 201 | /* No obvious pixelcount register. Only query vertical |
| 202 | * scanout position from Display scan line register. |
| 203 | */ |
| 204 | position = I915_READ(PIPEDSL(pipe)); |
| 205 | |
| 206 | /* Decode into vertical scanout position. Don't have |
| 207 | * horizontal scanout position. |
| 208 | */ |
| 209 | *vpos = position & 0x1fff; |
| 210 | *hpos = 0; |
| 211 | } else { |
| 212 | /* Have access to pixelcount since start of frame. |
| 213 | * We can split this into vertical and horizontal |
| 214 | * scanout position. |
| 215 | */ |
| 216 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; |
| 217 | |
| 218 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
| 219 | *vpos = position / htotal; |
| 220 | *hpos = position - (*vpos * htotal); |
| 221 | } |
| 222 | |
| 223 | /* Query vblank area. */ |
| 224 | vbl = I915_READ(VBLANK(cpu_transcoder)); |
| 225 | |
| 226 | /* Test position against vblank region. */ |
| 227 | vbl_start = vbl & 0x1fff; |
| 228 | vbl_end = (vbl >> 16) & 0x1fff; |
| 229 | |
| 230 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) |
| 231 | in_vbl = false; |
| 232 | |
| 233 | /* Inside "upper part" of vblank area? Apply corrective offset: */ |
| 234 | if (in_vbl && (*vpos >= vbl_start)) |
| 235 | *vpos = *vpos - vtotal; |
| 236 | |
| 237 | /* Readouts valid? */ |
| 238 | if (vbl > 0) |
| 239 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; |
| 240 | |
| 241 | /* In vblank? */ |
| 242 | if (in_vbl) |
| 243 | ret |= DRM_SCANOUTPOS_INVBL; |
| 244 | |
| 245 | return ret; |
| 246 | } |
| 247 | |
| 248 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
| 249 | int *max_error, |
| 250 | struct timeval *vblank_time, |
| 251 | unsigned flags) |
| 252 | { |
| 253 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 254 | struct drm_crtc *crtc; |
| 255 | |
| 256 | if (pipe < 0 || pipe >= dev_priv->num_pipe) { |
| 257 | DRM_ERROR("Invalid crtc %d\n", pipe); |
| 258 | return -EINVAL; |
| 259 | } |
| 260 | |
| 261 | /* Get drm_crtc to timestamp: */ |
| 262 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
| 263 | if (crtc == NULL) { |
| 264 | DRM_ERROR("Invalid crtc %d\n", pipe); |
| 265 | return -EINVAL; |
| 266 | } |
| 267 | |
| 268 | if (!crtc->enabled) { |
| 269 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); |
| 270 | return -EBUSY; |
| 271 | } |
| 272 | |
| 273 | /* Helper routine in DRM core does all the work: */ |
| 274 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
| 275 | vblank_time, flags, |
| 276 | crtc); |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * Handle hotplug events outside the interrupt handler proper. |
| 281 | */ |
| 282 | static void i915_hotplug_work_func(struct work_struct *work) |
| 283 | { |
| 284 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
| 285 | hotplug_work); |
| 286 | struct drm_device *dev = dev_priv->dev; |
| 287 | struct drm_mode_config *mode_config = &dev->mode_config; |
| 288 | struct intel_encoder *encoder; |
| 289 | |
| 290 | /* HPD irq before everything is fully set up. */ |
| 291 | if (!dev_priv->enable_hotplug_processing) |
| 292 | return; |
| 293 | |
| 294 | mutex_lock(&mode_config->mutex); |
| 295 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
| 296 | |
| 297 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
| 298 | if (encoder->hot_plug) |
| 299 | encoder->hot_plug(encoder); |
| 300 | |
| 301 | mutex_unlock(&mode_config->mutex); |
| 302 | |
| 303 | /* Just fire off a uevent and let userspace tell us what to do */ |
| 304 | drm_helper_hpd_irq_event(dev); |
| 305 | } |
| 306 | |
| 307 | static void ironlake_handle_rps_change(struct drm_device *dev) |
| 308 | { |
| 309 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 310 | u32 busy_up, busy_down, max_avg, min_avg; |
| 311 | u8 new_delay; |
| 312 | unsigned long flags; |
| 313 | |
| 314 | spin_lock_irqsave(&mchdev_lock, flags); |
| 315 | |
| 316 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
| 317 | |
| 318 | new_delay = dev_priv->ips.cur_delay; |
| 319 | |
| 320 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
| 321 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
| 322 | busy_down = I915_READ(RCPREVBSYTDNAVG); |
| 323 | max_avg = I915_READ(RCBMAXAVG); |
| 324 | min_avg = I915_READ(RCBMINAVG); |
| 325 | |
| 326 | /* Handle RCS change request from hw */ |
| 327 | if (busy_up > max_avg) { |
| 328 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
| 329 | new_delay = dev_priv->ips.cur_delay - 1; |
| 330 | if (new_delay < dev_priv->ips.max_delay) |
| 331 | new_delay = dev_priv->ips.max_delay; |
| 332 | } else if (busy_down < min_avg) { |
| 333 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
| 334 | new_delay = dev_priv->ips.cur_delay + 1; |
| 335 | if (new_delay > dev_priv->ips.min_delay) |
| 336 | new_delay = dev_priv->ips.min_delay; |
| 337 | } |
| 338 | |
| 339 | if (ironlake_set_drps(dev, new_delay)) |
| 340 | dev_priv->ips.cur_delay = new_delay; |
| 341 | |
| 342 | spin_unlock_irqrestore(&mchdev_lock, flags); |
| 343 | |
| 344 | return; |
| 345 | } |
| 346 | |
| 347 | static void notify_ring(struct drm_device *dev, |
| 348 | struct intel_ring_buffer *ring) |
| 349 | { |
| 350 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 351 | |
| 352 | if (ring->obj == NULL) |
| 353 | return; |
| 354 | |
| 355 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); |
| 356 | |
| 357 | wake_up_all(&ring->irq_queue); |
| 358 | if (i915_enable_hangcheck) { |
| 359 | dev_priv->gpu_error.hangcheck_count = 0; |
| 360 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
| 361 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
| 362 | } |
| 363 | } |
| 364 | |
| 365 | static void gen6_pm_rps_work(struct work_struct *work) |
| 366 | { |
| 367 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
| 368 | rps.work); |
| 369 | u32 pm_iir, pm_imr; |
| 370 | u8 new_delay; |
| 371 | |
| 372 | spin_lock_irq(&dev_priv->rps.lock); |
| 373 | pm_iir = dev_priv->rps.pm_iir; |
| 374 | dev_priv->rps.pm_iir = 0; |
| 375 | pm_imr = I915_READ(GEN6_PMIMR); |
| 376 | I915_WRITE(GEN6_PMIMR, 0); |
| 377 | spin_unlock_irq(&dev_priv->rps.lock); |
| 378 | |
| 379 | if ((pm_iir & GEN6_PM_DEFERRED_EVENTS) == 0) |
| 380 | return; |
| 381 | |
| 382 | mutex_lock(&dev_priv->rps.hw_lock); |
| 383 | |
| 384 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) |
| 385 | new_delay = dev_priv->rps.cur_delay + 1; |
| 386 | else |
| 387 | new_delay = dev_priv->rps.cur_delay - 1; |
| 388 | |
| 389 | /* sysfs frequency interfaces may have snuck in while servicing the |
| 390 | * interrupt |
| 391 | */ |
| 392 | if (!(new_delay > dev_priv->rps.max_delay || |
| 393 | new_delay < dev_priv->rps.min_delay)) { |
| 394 | gen6_set_rps(dev_priv->dev, new_delay); |
| 395 | } |
| 396 | |
| 397 | mutex_unlock(&dev_priv->rps.hw_lock); |
| 398 | } |
| 399 | |
| 400 | |
| 401 | /** |
| 402 | * ivybridge_parity_work - Workqueue called when a parity error interrupt |
| 403 | * occurred. |
| 404 | * @work: workqueue struct |
| 405 | * |
| 406 | * Doesn't actually do anything except notify userspace. As a consequence of |
| 407 | * this event, userspace should try to remap the bad rows since statistically |
| 408 | * it is likely the same row is more likely to go bad again. |
| 409 | */ |
| 410 | static void ivybridge_parity_work(struct work_struct *work) |
| 411 | { |
| 412 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
| 413 | l3_parity.error_work); |
| 414 | u32 error_status, row, bank, subbank; |
| 415 | char *parity_event[5]; |
| 416 | uint32_t misccpctl; |
| 417 | unsigned long flags; |
| 418 | |
| 419 | /* We must turn off DOP level clock gating to access the L3 registers. |
| 420 | * In order to prevent a get/put style interface, acquire struct mutex |
| 421 | * any time we access those registers. |
| 422 | */ |
| 423 | mutex_lock(&dev_priv->dev->struct_mutex); |
| 424 | |
| 425 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
| 426 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); |
| 427 | POSTING_READ(GEN7_MISCCPCTL); |
| 428 | |
| 429 | error_status = I915_READ(GEN7_L3CDERRST1); |
| 430 | row = GEN7_PARITY_ERROR_ROW(error_status); |
| 431 | bank = GEN7_PARITY_ERROR_BANK(error_status); |
| 432 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); |
| 433 | |
| 434 | I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | |
| 435 | GEN7_L3CDERRST1_ENABLE); |
| 436 | POSTING_READ(GEN7_L3CDERRST1); |
| 437 | |
| 438 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
| 439 | |
| 440 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
| 441 | dev_priv->gt_irq_mask &= ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
| 442 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
| 443 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 444 | |
| 445 | mutex_unlock(&dev_priv->dev->struct_mutex); |
| 446 | |
| 447 | parity_event[0] = "L3_PARITY_ERROR=1"; |
| 448 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); |
| 449 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); |
| 450 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); |
| 451 | parity_event[4] = NULL; |
| 452 | |
| 453 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, |
| 454 | KOBJ_CHANGE, parity_event); |
| 455 | |
| 456 | DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", |
| 457 | row, bank, subbank); |
| 458 | |
| 459 | kfree(parity_event[3]); |
| 460 | kfree(parity_event[2]); |
| 461 | kfree(parity_event[1]); |
| 462 | } |
| 463 | |
| 464 | static void ivybridge_handle_parity_error(struct drm_device *dev) |
| 465 | { |
| 466 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 467 | unsigned long flags; |
| 468 | |
| 469 | if (!HAS_L3_GPU_CACHE(dev)) |
| 470 | return; |
| 471 | |
| 472 | spin_lock_irqsave(&dev_priv->irq_lock, flags); |
| 473 | dev_priv->gt_irq_mask |= GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
| 474 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
| 475 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); |
| 476 | |
| 477 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
| 478 | } |
| 479 | |
| 480 | static void snb_gt_irq_handler(struct drm_device *dev, |
| 481 | struct drm_i915_private *dev_priv, |
| 482 | u32 gt_iir) |
| 483 | { |
| 484 | |
| 485 | if (gt_iir & (GEN6_RENDER_USER_INTERRUPT | |
| 486 | GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT)) |
| 487 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 488 | if (gt_iir & GEN6_BSD_USER_INTERRUPT) |
| 489 | notify_ring(dev, &dev_priv->ring[VCS]); |
| 490 | if (gt_iir & GEN6_BLITTER_USER_INTERRUPT) |
| 491 | notify_ring(dev, &dev_priv->ring[BCS]); |
| 492 | |
| 493 | if (gt_iir & (GT_GEN6_BLT_CS_ERROR_INTERRUPT | |
| 494 | GT_GEN6_BSD_CS_ERROR_INTERRUPT | |
| 495 | GT_RENDER_CS_ERROR_INTERRUPT)) { |
| 496 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); |
| 497 | i915_handle_error(dev, false); |
| 498 | } |
| 499 | |
| 500 | if (gt_iir & GT_GEN7_L3_PARITY_ERROR_INTERRUPT) |
| 501 | ivybridge_handle_parity_error(dev); |
| 502 | } |
| 503 | |
| 504 | static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, |
| 505 | u32 pm_iir) |
| 506 | { |
| 507 | unsigned long flags; |
| 508 | |
| 509 | /* |
| 510 | * IIR bits should never already be set because IMR should |
| 511 | * prevent an interrupt from being shown in IIR. The warning |
| 512 | * displays a case where we've unsafely cleared |
| 513 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same |
| 514 | * type is not a problem, it displays a problem in the logic. |
| 515 | * |
| 516 | * The mask bit in IMR is cleared by dev_priv->rps.work. |
| 517 | */ |
| 518 | |
| 519 | spin_lock_irqsave(&dev_priv->rps.lock, flags); |
| 520 | dev_priv->rps.pm_iir |= pm_iir; |
| 521 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); |
| 522 | POSTING_READ(GEN6_PMIMR); |
| 523 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); |
| 524 | |
| 525 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
| 526 | } |
| 527 | |
| 528 | static void gmbus_irq_handler(struct drm_device *dev) |
| 529 | { |
| 530 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 531 | |
| 532 | wake_up_all(&dev_priv->gmbus_wait_queue); |
| 533 | } |
| 534 | |
| 535 | static void dp_aux_irq_handler(struct drm_device *dev) |
| 536 | { |
| 537 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 538 | |
| 539 | wake_up_all(&dev_priv->gmbus_wait_queue); |
| 540 | } |
| 541 | |
| 542 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
| 543 | { |
| 544 | struct drm_device *dev = (struct drm_device *) arg; |
| 545 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 546 | u32 iir, gt_iir, pm_iir; |
| 547 | irqreturn_t ret = IRQ_NONE; |
| 548 | unsigned long irqflags; |
| 549 | int pipe; |
| 550 | u32 pipe_stats[I915_MAX_PIPES]; |
| 551 | |
| 552 | atomic_inc(&dev_priv->irq_received); |
| 553 | |
| 554 | while (true) { |
| 555 | iir = I915_READ(VLV_IIR); |
| 556 | gt_iir = I915_READ(GTIIR); |
| 557 | pm_iir = I915_READ(GEN6_PMIIR); |
| 558 | |
| 559 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) |
| 560 | goto out; |
| 561 | |
| 562 | ret = IRQ_HANDLED; |
| 563 | |
| 564 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
| 565 | |
| 566 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 567 | for_each_pipe(pipe) { |
| 568 | int reg = PIPESTAT(pipe); |
| 569 | pipe_stats[pipe] = I915_READ(reg); |
| 570 | |
| 571 | /* |
| 572 | * Clear the PIPE*STAT regs before the IIR |
| 573 | */ |
| 574 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 575 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 576 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 577 | pipe_name(pipe)); |
| 578 | I915_WRITE(reg, pipe_stats[pipe]); |
| 579 | } |
| 580 | } |
| 581 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 582 | |
| 583 | for_each_pipe(pipe) { |
| 584 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) |
| 585 | drm_handle_vblank(dev, pipe); |
| 586 | |
| 587 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { |
| 588 | intel_prepare_page_flip(dev, pipe); |
| 589 | intel_finish_page_flip(dev, pipe); |
| 590 | } |
| 591 | } |
| 592 | |
| 593 | /* Consume port. Then clear IIR or we'll miss events */ |
| 594 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
| 595 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
| 596 | |
| 597 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
| 598 | hotplug_status); |
| 599 | if (hotplug_status & dev_priv->hotplug_supported_mask) |
| 600 | queue_work(dev_priv->wq, |
| 601 | &dev_priv->hotplug_work); |
| 602 | |
| 603 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 604 | I915_READ(PORT_HOTPLUG_STAT); |
| 605 | } |
| 606 | |
| 607 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
| 608 | gmbus_irq_handler(dev); |
| 609 | |
| 610 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
| 611 | gen6_queue_rps_work(dev_priv, pm_iir); |
| 612 | |
| 613 | I915_WRITE(GTIIR, gt_iir); |
| 614 | I915_WRITE(GEN6_PMIIR, pm_iir); |
| 615 | I915_WRITE(VLV_IIR, iir); |
| 616 | } |
| 617 | |
| 618 | out: |
| 619 | return ret; |
| 620 | } |
| 621 | |
| 622 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
| 623 | { |
| 624 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 625 | int pipe; |
| 626 | |
| 627 | if (pch_iir & SDE_HOTPLUG_MASK) |
| 628 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
| 629 | |
| 630 | if (pch_iir & SDE_AUDIO_POWER_MASK) |
| 631 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
| 632 | (pch_iir & SDE_AUDIO_POWER_MASK) >> |
| 633 | SDE_AUDIO_POWER_SHIFT); |
| 634 | |
| 635 | if (pch_iir & SDE_AUX_MASK) |
| 636 | dp_aux_irq_handler(dev); |
| 637 | |
| 638 | if (pch_iir & SDE_GMBUS) |
| 639 | gmbus_irq_handler(dev); |
| 640 | |
| 641 | if (pch_iir & SDE_AUDIO_HDCP_MASK) |
| 642 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); |
| 643 | |
| 644 | if (pch_iir & SDE_AUDIO_TRANS_MASK) |
| 645 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); |
| 646 | |
| 647 | if (pch_iir & SDE_POISON) |
| 648 | DRM_ERROR("PCH poison interrupt\n"); |
| 649 | |
| 650 | if (pch_iir & SDE_FDI_MASK) |
| 651 | for_each_pipe(pipe) |
| 652 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
| 653 | pipe_name(pipe), |
| 654 | I915_READ(FDI_RX_IIR(pipe))); |
| 655 | |
| 656 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) |
| 657 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); |
| 658 | |
| 659 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) |
| 660 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); |
| 661 | |
| 662 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) |
| 663 | DRM_DEBUG_DRIVER("PCH transcoder B underrun interrupt\n"); |
| 664 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
| 665 | DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); |
| 666 | } |
| 667 | |
| 668 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
| 669 | { |
| 670 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 671 | int pipe; |
| 672 | |
| 673 | if (pch_iir & SDE_HOTPLUG_MASK_CPT) |
| 674 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
| 675 | |
| 676 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) |
| 677 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
| 678 | (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> |
| 679 | SDE_AUDIO_POWER_SHIFT_CPT); |
| 680 | |
| 681 | if (pch_iir & SDE_AUX_MASK_CPT) |
| 682 | dp_aux_irq_handler(dev); |
| 683 | |
| 684 | if (pch_iir & SDE_GMBUS_CPT) |
| 685 | gmbus_irq_handler(dev); |
| 686 | |
| 687 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) |
| 688 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); |
| 689 | |
| 690 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) |
| 691 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); |
| 692 | |
| 693 | if (pch_iir & SDE_FDI_MASK_CPT) |
| 694 | for_each_pipe(pipe) |
| 695 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", |
| 696 | pipe_name(pipe), |
| 697 | I915_READ(FDI_RX_IIR(pipe))); |
| 698 | } |
| 699 | |
| 700 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) |
| 701 | { |
| 702 | struct drm_device *dev = (struct drm_device *) arg; |
| 703 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 704 | u32 de_iir, gt_iir, de_ier, pm_iir; |
| 705 | irqreturn_t ret = IRQ_NONE; |
| 706 | int i; |
| 707 | |
| 708 | atomic_inc(&dev_priv->irq_received); |
| 709 | |
| 710 | /* disable master interrupt before clearing iir */ |
| 711 | de_ier = I915_READ(DEIER); |
| 712 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
| 713 | |
| 714 | gt_iir = I915_READ(GTIIR); |
| 715 | if (gt_iir) { |
| 716 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
| 717 | I915_WRITE(GTIIR, gt_iir); |
| 718 | ret = IRQ_HANDLED; |
| 719 | } |
| 720 | |
| 721 | de_iir = I915_READ(DEIIR); |
| 722 | if (de_iir) { |
| 723 | if (de_iir & DE_AUX_CHANNEL_A_IVB) |
| 724 | dp_aux_irq_handler(dev); |
| 725 | |
| 726 | if (de_iir & DE_GSE_IVB) |
| 727 | intel_opregion_gse_intr(dev); |
| 728 | |
| 729 | for (i = 0; i < 3; i++) { |
| 730 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) |
| 731 | drm_handle_vblank(dev, i); |
| 732 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { |
| 733 | intel_prepare_page_flip(dev, i); |
| 734 | intel_finish_page_flip_plane(dev, i); |
| 735 | } |
| 736 | } |
| 737 | |
| 738 | /* check event from PCH */ |
| 739 | if (de_iir & DE_PCH_EVENT_IVB) { |
| 740 | u32 pch_iir = I915_READ(SDEIIR); |
| 741 | |
| 742 | cpt_irq_handler(dev, pch_iir); |
| 743 | |
| 744 | /* clear PCH hotplug event before clear CPU irq */ |
| 745 | I915_WRITE(SDEIIR, pch_iir); |
| 746 | } |
| 747 | |
| 748 | I915_WRITE(DEIIR, de_iir); |
| 749 | ret = IRQ_HANDLED; |
| 750 | } |
| 751 | |
| 752 | pm_iir = I915_READ(GEN6_PMIIR); |
| 753 | if (pm_iir) { |
| 754 | if (pm_iir & GEN6_PM_DEFERRED_EVENTS) |
| 755 | gen6_queue_rps_work(dev_priv, pm_iir); |
| 756 | I915_WRITE(GEN6_PMIIR, pm_iir); |
| 757 | ret = IRQ_HANDLED; |
| 758 | } |
| 759 | |
| 760 | I915_WRITE(DEIER, de_ier); |
| 761 | POSTING_READ(DEIER); |
| 762 | |
| 763 | return ret; |
| 764 | } |
| 765 | |
| 766 | static void ilk_gt_irq_handler(struct drm_device *dev, |
| 767 | struct drm_i915_private *dev_priv, |
| 768 | u32 gt_iir) |
| 769 | { |
| 770 | if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY)) |
| 771 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 772 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
| 773 | notify_ring(dev, &dev_priv->ring[VCS]); |
| 774 | } |
| 775 | |
| 776 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
| 777 | { |
| 778 | struct drm_device *dev = (struct drm_device *) arg; |
| 779 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 780 | int ret = IRQ_NONE; |
| 781 | u32 de_iir, gt_iir, de_ier, pm_iir; |
| 782 | |
| 783 | atomic_inc(&dev_priv->irq_received); |
| 784 | |
| 785 | /* disable master interrupt before clearing iir */ |
| 786 | de_ier = I915_READ(DEIER); |
| 787 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); |
| 788 | POSTING_READ(DEIER); |
| 789 | |
| 790 | de_iir = I915_READ(DEIIR); |
| 791 | gt_iir = I915_READ(GTIIR); |
| 792 | pm_iir = I915_READ(GEN6_PMIIR); |
| 793 | |
| 794 | if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) |
| 795 | goto done; |
| 796 | |
| 797 | ret = IRQ_HANDLED; |
| 798 | |
| 799 | if (IS_GEN5(dev)) |
| 800 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); |
| 801 | else |
| 802 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
| 803 | |
| 804 | if (de_iir & DE_AUX_CHANNEL_A) |
| 805 | dp_aux_irq_handler(dev); |
| 806 | |
| 807 | if (de_iir & DE_GSE) |
| 808 | intel_opregion_gse_intr(dev); |
| 809 | |
| 810 | if (de_iir & DE_PIPEA_VBLANK) |
| 811 | drm_handle_vblank(dev, 0); |
| 812 | |
| 813 | if (de_iir & DE_PIPEB_VBLANK) |
| 814 | drm_handle_vblank(dev, 1); |
| 815 | |
| 816 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
| 817 | intel_prepare_page_flip(dev, 0); |
| 818 | intel_finish_page_flip_plane(dev, 0); |
| 819 | } |
| 820 | |
| 821 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
| 822 | intel_prepare_page_flip(dev, 1); |
| 823 | intel_finish_page_flip_plane(dev, 1); |
| 824 | } |
| 825 | |
| 826 | /* check event from PCH */ |
| 827 | if (de_iir & DE_PCH_EVENT) { |
| 828 | u32 pch_iir = I915_READ(SDEIIR); |
| 829 | |
| 830 | if (HAS_PCH_CPT(dev)) |
| 831 | cpt_irq_handler(dev, pch_iir); |
| 832 | else |
| 833 | ibx_irq_handler(dev, pch_iir); |
| 834 | |
| 835 | /* should clear PCH hotplug event before clear CPU irq */ |
| 836 | I915_WRITE(SDEIIR, pch_iir); |
| 837 | } |
| 838 | |
| 839 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
| 840 | ironlake_handle_rps_change(dev); |
| 841 | |
| 842 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_DEFERRED_EVENTS) |
| 843 | gen6_queue_rps_work(dev_priv, pm_iir); |
| 844 | |
| 845 | I915_WRITE(GTIIR, gt_iir); |
| 846 | I915_WRITE(DEIIR, de_iir); |
| 847 | I915_WRITE(GEN6_PMIIR, pm_iir); |
| 848 | |
| 849 | done: |
| 850 | I915_WRITE(DEIER, de_ier); |
| 851 | POSTING_READ(DEIER); |
| 852 | |
| 853 | return ret; |
| 854 | } |
| 855 | |
| 856 | /** |
| 857 | * i915_error_work_func - do process context error handling work |
| 858 | * @work: work struct |
| 859 | * |
| 860 | * Fire an error uevent so userspace can see that a hang or error |
| 861 | * was detected. |
| 862 | */ |
| 863 | static void i915_error_work_func(struct work_struct *work) |
| 864 | { |
| 865 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
| 866 | gpu_error.work); |
| 867 | struct drm_device *dev = dev_priv->dev; |
| 868 | char *error_event[] = { "ERROR=1", NULL }; |
| 869 | char *reset_event[] = { "RESET=1", NULL }; |
| 870 | char *reset_done_event[] = { "ERROR=0", NULL }; |
| 871 | |
| 872 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
| 873 | |
| 874 | if (atomic_read(&dev_priv->gpu_error.wedged)) { |
| 875 | DRM_DEBUG_DRIVER("resetting chip\n"); |
| 876 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event); |
| 877 | if (!i915_reset(dev)) { |
| 878 | atomic_set(&dev_priv->gpu_error.wedged, 0); |
| 879 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event); |
| 880 | } |
| 881 | complete_all(&dev_priv->gpu_error.completion); |
| 882 | } |
| 883 | } |
| 884 | |
| 885 | /* NB: please notice the memset */ |
| 886 | static void i915_get_extra_instdone(struct drm_device *dev, |
| 887 | uint32_t *instdone) |
| 888 | { |
| 889 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 890 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); |
| 891 | |
| 892 | switch(INTEL_INFO(dev)->gen) { |
| 893 | case 2: |
| 894 | case 3: |
| 895 | instdone[0] = I915_READ(INSTDONE); |
| 896 | break; |
| 897 | case 4: |
| 898 | case 5: |
| 899 | case 6: |
| 900 | instdone[0] = I915_READ(INSTDONE_I965); |
| 901 | instdone[1] = I915_READ(INSTDONE1); |
| 902 | break; |
| 903 | default: |
| 904 | WARN_ONCE(1, "Unsupported platform\n"); |
| 905 | case 7: |
| 906 | instdone[0] = I915_READ(GEN7_INSTDONE_1); |
| 907 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); |
| 908 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); |
| 909 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); |
| 910 | break; |
| 911 | } |
| 912 | } |
| 913 | |
| 914 | #ifdef CONFIG_DEBUG_FS |
| 915 | static struct drm_i915_error_object * |
| 916 | i915_error_object_create(struct drm_i915_private *dev_priv, |
| 917 | struct drm_i915_gem_object *src) |
| 918 | { |
| 919 | struct drm_i915_error_object *dst; |
| 920 | int i, count; |
| 921 | u32 reloc_offset; |
| 922 | |
| 923 | if (src == NULL || src->pages == NULL) |
| 924 | return NULL; |
| 925 | |
| 926 | count = src->base.size / PAGE_SIZE; |
| 927 | |
| 928 | dst = kmalloc(sizeof(*dst) + count * sizeof(u32 *), GFP_ATOMIC); |
| 929 | if (dst == NULL) |
| 930 | return NULL; |
| 931 | |
| 932 | reloc_offset = src->gtt_offset; |
| 933 | for (i = 0; i < count; i++) { |
| 934 | unsigned long flags; |
| 935 | void *d; |
| 936 | |
| 937 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
| 938 | if (d == NULL) |
| 939 | goto unwind; |
| 940 | |
| 941 | local_irq_save(flags); |
| 942 | if (reloc_offset < dev_priv->gtt.mappable_end && |
| 943 | src->has_global_gtt_mapping) { |
| 944 | void __iomem *s; |
| 945 | |
| 946 | /* Simply ignore tiling or any overlapping fence. |
| 947 | * It's part of the error state, and this hopefully |
| 948 | * captures what the GPU read. |
| 949 | */ |
| 950 | |
| 951 | s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
| 952 | reloc_offset); |
| 953 | memcpy_fromio(d, s, PAGE_SIZE); |
| 954 | io_mapping_unmap_atomic(s); |
| 955 | } else if (src->stolen) { |
| 956 | unsigned long offset; |
| 957 | |
| 958 | offset = dev_priv->mm.stolen_base; |
| 959 | offset += src->stolen->start; |
| 960 | offset += i << PAGE_SHIFT; |
| 961 | |
| 962 | memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); |
| 963 | } else { |
| 964 | struct page *page; |
| 965 | void *s; |
| 966 | |
| 967 | page = i915_gem_object_get_page(src, i); |
| 968 | |
| 969 | drm_clflush_pages(&page, 1); |
| 970 | |
| 971 | s = kmap_atomic(page); |
| 972 | memcpy(d, s, PAGE_SIZE); |
| 973 | kunmap_atomic(s); |
| 974 | |
| 975 | drm_clflush_pages(&page, 1); |
| 976 | } |
| 977 | local_irq_restore(flags); |
| 978 | |
| 979 | dst->pages[i] = d; |
| 980 | |
| 981 | reloc_offset += PAGE_SIZE; |
| 982 | } |
| 983 | dst->page_count = count; |
| 984 | dst->gtt_offset = src->gtt_offset; |
| 985 | |
| 986 | return dst; |
| 987 | |
| 988 | unwind: |
| 989 | while (i--) |
| 990 | kfree(dst->pages[i]); |
| 991 | kfree(dst); |
| 992 | return NULL; |
| 993 | } |
| 994 | |
| 995 | static void |
| 996 | i915_error_object_free(struct drm_i915_error_object *obj) |
| 997 | { |
| 998 | int page; |
| 999 | |
| 1000 | if (obj == NULL) |
| 1001 | return; |
| 1002 | |
| 1003 | for (page = 0; page < obj->page_count; page++) |
| 1004 | kfree(obj->pages[page]); |
| 1005 | |
| 1006 | kfree(obj); |
| 1007 | } |
| 1008 | |
| 1009 | void |
| 1010 | i915_error_state_free(struct kref *error_ref) |
| 1011 | { |
| 1012 | struct drm_i915_error_state *error = container_of(error_ref, |
| 1013 | typeof(*error), ref); |
| 1014 | int i; |
| 1015 | |
| 1016 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
| 1017 | i915_error_object_free(error->ring[i].batchbuffer); |
| 1018 | i915_error_object_free(error->ring[i].ringbuffer); |
| 1019 | kfree(error->ring[i].requests); |
| 1020 | } |
| 1021 | |
| 1022 | kfree(error->active_bo); |
| 1023 | kfree(error->overlay); |
| 1024 | kfree(error); |
| 1025 | } |
| 1026 | static void capture_bo(struct drm_i915_error_buffer *err, |
| 1027 | struct drm_i915_gem_object *obj) |
| 1028 | { |
| 1029 | err->size = obj->base.size; |
| 1030 | err->name = obj->base.name; |
| 1031 | err->rseqno = obj->last_read_seqno; |
| 1032 | err->wseqno = obj->last_write_seqno; |
| 1033 | err->gtt_offset = obj->gtt_offset; |
| 1034 | err->read_domains = obj->base.read_domains; |
| 1035 | err->write_domain = obj->base.write_domain; |
| 1036 | err->fence_reg = obj->fence_reg; |
| 1037 | err->pinned = 0; |
| 1038 | if (obj->pin_count > 0) |
| 1039 | err->pinned = 1; |
| 1040 | if (obj->user_pin_count > 0) |
| 1041 | err->pinned = -1; |
| 1042 | err->tiling = obj->tiling_mode; |
| 1043 | err->dirty = obj->dirty; |
| 1044 | err->purgeable = obj->madv != I915_MADV_WILLNEED; |
| 1045 | err->ring = obj->ring ? obj->ring->id : -1; |
| 1046 | err->cache_level = obj->cache_level; |
| 1047 | } |
| 1048 | |
| 1049 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, |
| 1050 | int count, struct list_head *head) |
| 1051 | { |
| 1052 | struct drm_i915_gem_object *obj; |
| 1053 | int i = 0; |
| 1054 | |
| 1055 | list_for_each_entry(obj, head, mm_list) { |
| 1056 | capture_bo(err++, obj); |
| 1057 | if (++i == count) |
| 1058 | break; |
| 1059 | } |
| 1060 | |
| 1061 | return i; |
| 1062 | } |
| 1063 | |
| 1064 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, |
| 1065 | int count, struct list_head *head) |
| 1066 | { |
| 1067 | struct drm_i915_gem_object *obj; |
| 1068 | int i = 0; |
| 1069 | |
| 1070 | list_for_each_entry(obj, head, gtt_list) { |
| 1071 | if (obj->pin_count == 0) |
| 1072 | continue; |
| 1073 | |
| 1074 | capture_bo(err++, obj); |
| 1075 | if (++i == count) |
| 1076 | break; |
| 1077 | } |
| 1078 | |
| 1079 | return i; |
| 1080 | } |
| 1081 | |
| 1082 | static void i915_gem_record_fences(struct drm_device *dev, |
| 1083 | struct drm_i915_error_state *error) |
| 1084 | { |
| 1085 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1086 | int i; |
| 1087 | |
| 1088 | /* Fences */ |
| 1089 | switch (INTEL_INFO(dev)->gen) { |
| 1090 | case 7: |
| 1091 | case 6: |
| 1092 | for (i = 0; i < 16; i++) |
| 1093 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
| 1094 | break; |
| 1095 | case 5: |
| 1096 | case 4: |
| 1097 | for (i = 0; i < 16; i++) |
| 1098 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); |
| 1099 | break; |
| 1100 | case 3: |
| 1101 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) |
| 1102 | for (i = 0; i < 8; i++) |
| 1103 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); |
| 1104 | case 2: |
| 1105 | for (i = 0; i < 8; i++) |
| 1106 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); |
| 1107 | break; |
| 1108 | |
| 1109 | default: |
| 1110 | BUG(); |
| 1111 | } |
| 1112 | } |
| 1113 | |
| 1114 | static struct drm_i915_error_object * |
| 1115 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
| 1116 | struct intel_ring_buffer *ring) |
| 1117 | { |
| 1118 | struct drm_i915_gem_object *obj; |
| 1119 | u32 seqno; |
| 1120 | |
| 1121 | if (!ring->get_seqno) |
| 1122 | return NULL; |
| 1123 | |
| 1124 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { |
| 1125 | u32 acthd = I915_READ(ACTHD); |
| 1126 | |
| 1127 | if (WARN_ON(ring->id != RCS)) |
| 1128 | return NULL; |
| 1129 | |
| 1130 | obj = ring->private; |
| 1131 | if (acthd >= obj->gtt_offset && |
| 1132 | acthd < obj->gtt_offset + obj->base.size) |
| 1133 | return i915_error_object_create(dev_priv, obj); |
| 1134 | } |
| 1135 | |
| 1136 | seqno = ring->get_seqno(ring, false); |
| 1137 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
| 1138 | if (obj->ring != ring) |
| 1139 | continue; |
| 1140 | |
| 1141 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
| 1142 | continue; |
| 1143 | |
| 1144 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) |
| 1145 | continue; |
| 1146 | |
| 1147 | /* We need to copy these to an anonymous buffer as the simplest |
| 1148 | * method to avoid being overwritten by userspace. |
| 1149 | */ |
| 1150 | return i915_error_object_create(dev_priv, obj); |
| 1151 | } |
| 1152 | |
| 1153 | return NULL; |
| 1154 | } |
| 1155 | |
| 1156 | static void i915_record_ring_state(struct drm_device *dev, |
| 1157 | struct drm_i915_error_state *error, |
| 1158 | struct intel_ring_buffer *ring) |
| 1159 | { |
| 1160 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1161 | |
| 1162 | if (INTEL_INFO(dev)->gen >= 6) { |
| 1163 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); |
| 1164 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); |
| 1165 | error->semaphore_mboxes[ring->id][0] |
| 1166 | = I915_READ(RING_SYNC_0(ring->mmio_base)); |
| 1167 | error->semaphore_mboxes[ring->id][1] |
| 1168 | = I915_READ(RING_SYNC_1(ring->mmio_base)); |
| 1169 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; |
| 1170 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; |
| 1171 | } |
| 1172 | |
| 1173 | if (INTEL_INFO(dev)->gen >= 4) { |
| 1174 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); |
| 1175 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); |
| 1176 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); |
| 1177 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); |
| 1178 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); |
| 1179 | if (ring->id == RCS) |
| 1180 | error->bbaddr = I915_READ64(BB_ADDR); |
| 1181 | } else { |
| 1182 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); |
| 1183 | error->ipeir[ring->id] = I915_READ(IPEIR); |
| 1184 | error->ipehr[ring->id] = I915_READ(IPEHR); |
| 1185 | error->instdone[ring->id] = I915_READ(INSTDONE); |
| 1186 | } |
| 1187 | |
| 1188 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); |
| 1189 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); |
| 1190 | error->seqno[ring->id] = ring->get_seqno(ring, false); |
| 1191 | error->acthd[ring->id] = intel_ring_get_active_head(ring); |
| 1192 | error->head[ring->id] = I915_READ_HEAD(ring); |
| 1193 | error->tail[ring->id] = I915_READ_TAIL(ring); |
| 1194 | |
| 1195 | error->cpu_ring_head[ring->id] = ring->head; |
| 1196 | error->cpu_ring_tail[ring->id] = ring->tail; |
| 1197 | } |
| 1198 | |
| 1199 | static void i915_gem_record_rings(struct drm_device *dev, |
| 1200 | struct drm_i915_error_state *error) |
| 1201 | { |
| 1202 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1203 | struct intel_ring_buffer *ring; |
| 1204 | struct drm_i915_gem_request *request; |
| 1205 | int i, count; |
| 1206 | |
| 1207 | for_each_ring(ring, dev_priv, i) { |
| 1208 | i915_record_ring_state(dev, error, ring); |
| 1209 | |
| 1210 | error->ring[i].batchbuffer = |
| 1211 | i915_error_first_batchbuffer(dev_priv, ring); |
| 1212 | |
| 1213 | error->ring[i].ringbuffer = |
| 1214 | i915_error_object_create(dev_priv, ring->obj); |
| 1215 | |
| 1216 | count = 0; |
| 1217 | list_for_each_entry(request, &ring->request_list, list) |
| 1218 | count++; |
| 1219 | |
| 1220 | error->ring[i].num_requests = count; |
| 1221 | error->ring[i].requests = |
| 1222 | kmalloc(count*sizeof(struct drm_i915_error_request), |
| 1223 | GFP_ATOMIC); |
| 1224 | if (error->ring[i].requests == NULL) { |
| 1225 | error->ring[i].num_requests = 0; |
| 1226 | continue; |
| 1227 | } |
| 1228 | |
| 1229 | count = 0; |
| 1230 | list_for_each_entry(request, &ring->request_list, list) { |
| 1231 | struct drm_i915_error_request *erq; |
| 1232 | |
| 1233 | erq = &error->ring[i].requests[count++]; |
| 1234 | erq->seqno = request->seqno; |
| 1235 | erq->jiffies = request->emitted_jiffies; |
| 1236 | erq->tail = request->tail; |
| 1237 | } |
| 1238 | } |
| 1239 | } |
| 1240 | |
| 1241 | /** |
| 1242 | * i915_capture_error_state - capture an error record for later analysis |
| 1243 | * @dev: drm device |
| 1244 | * |
| 1245 | * Should be called when an error is detected (either a hang or an error |
| 1246 | * interrupt) to capture error state from the time of the error. Fills |
| 1247 | * out a structure which becomes available in debugfs for user level tools |
| 1248 | * to pick up. |
| 1249 | */ |
| 1250 | static void i915_capture_error_state(struct drm_device *dev) |
| 1251 | { |
| 1252 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1253 | struct drm_i915_gem_object *obj; |
| 1254 | struct drm_i915_error_state *error; |
| 1255 | unsigned long flags; |
| 1256 | int i, pipe; |
| 1257 | |
| 1258 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
| 1259 | error = dev_priv->gpu_error.first_error; |
| 1260 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
| 1261 | if (error) |
| 1262 | return; |
| 1263 | |
| 1264 | /* Account for pipe specific data like PIPE*STAT */ |
| 1265 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
| 1266 | if (!error) { |
| 1267 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
| 1268 | return; |
| 1269 | } |
| 1270 | |
| 1271 | DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n", |
| 1272 | dev->primary->index); |
| 1273 | |
| 1274 | kref_init(&error->ref); |
| 1275 | error->eir = I915_READ(EIR); |
| 1276 | error->pgtbl_er = I915_READ(PGTBL_ER); |
| 1277 | error->ccid = I915_READ(CCID); |
| 1278 | |
| 1279 | if (HAS_PCH_SPLIT(dev)) |
| 1280 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); |
| 1281 | else if (IS_VALLEYVIEW(dev)) |
| 1282 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); |
| 1283 | else if (IS_GEN2(dev)) |
| 1284 | error->ier = I915_READ16(IER); |
| 1285 | else |
| 1286 | error->ier = I915_READ(IER); |
| 1287 | |
| 1288 | for_each_pipe(pipe) |
| 1289 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); |
| 1290 | |
| 1291 | if (INTEL_INFO(dev)->gen >= 6) { |
| 1292 | error->error = I915_READ(ERROR_GEN6); |
| 1293 | error->done_reg = I915_READ(DONE_REG); |
| 1294 | } |
| 1295 | |
| 1296 | if (INTEL_INFO(dev)->gen == 7) |
| 1297 | error->err_int = I915_READ(GEN7_ERR_INT); |
| 1298 | |
| 1299 | i915_get_extra_instdone(dev, error->extra_instdone); |
| 1300 | |
| 1301 | i915_gem_record_fences(dev, error); |
| 1302 | i915_gem_record_rings(dev, error); |
| 1303 | |
| 1304 | /* Record buffers on the active and pinned lists. */ |
| 1305 | error->active_bo = NULL; |
| 1306 | error->pinned_bo = NULL; |
| 1307 | |
| 1308 | i = 0; |
| 1309 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) |
| 1310 | i++; |
| 1311 | error->active_bo_count = i; |
| 1312 | list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) |
| 1313 | if (obj->pin_count) |
| 1314 | i++; |
| 1315 | error->pinned_bo_count = i - error->active_bo_count; |
| 1316 | |
| 1317 | error->active_bo = NULL; |
| 1318 | error->pinned_bo = NULL; |
| 1319 | if (i) { |
| 1320 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, |
| 1321 | GFP_ATOMIC); |
| 1322 | if (error->active_bo) |
| 1323 | error->pinned_bo = |
| 1324 | error->active_bo + error->active_bo_count; |
| 1325 | } |
| 1326 | |
| 1327 | if (error->active_bo) |
| 1328 | error->active_bo_count = |
| 1329 | capture_active_bo(error->active_bo, |
| 1330 | error->active_bo_count, |
| 1331 | &dev_priv->mm.active_list); |
| 1332 | |
| 1333 | if (error->pinned_bo) |
| 1334 | error->pinned_bo_count = |
| 1335 | capture_pinned_bo(error->pinned_bo, |
| 1336 | error->pinned_bo_count, |
| 1337 | &dev_priv->mm.bound_list); |
| 1338 | |
| 1339 | do_gettimeofday(&error->time); |
| 1340 | |
| 1341 | error->overlay = intel_overlay_capture_error_state(dev); |
| 1342 | error->display = intel_display_capture_error_state(dev); |
| 1343 | |
| 1344 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
| 1345 | if (dev_priv->gpu_error.first_error == NULL) { |
| 1346 | dev_priv->gpu_error.first_error = error; |
| 1347 | error = NULL; |
| 1348 | } |
| 1349 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
| 1350 | |
| 1351 | if (error) |
| 1352 | i915_error_state_free(&error->ref); |
| 1353 | } |
| 1354 | |
| 1355 | void i915_destroy_error_state(struct drm_device *dev) |
| 1356 | { |
| 1357 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1358 | struct drm_i915_error_state *error; |
| 1359 | unsigned long flags; |
| 1360 | |
| 1361 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
| 1362 | error = dev_priv->gpu_error.first_error; |
| 1363 | dev_priv->gpu_error.first_error = NULL; |
| 1364 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
| 1365 | |
| 1366 | if (error) |
| 1367 | kref_put(&error->ref, i915_error_state_free); |
| 1368 | } |
| 1369 | #else |
| 1370 | #define i915_capture_error_state(x) |
| 1371 | #endif |
| 1372 | |
| 1373 | static void i915_report_and_clear_eir(struct drm_device *dev) |
| 1374 | { |
| 1375 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1376 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
| 1377 | u32 eir = I915_READ(EIR); |
| 1378 | int pipe, i; |
| 1379 | |
| 1380 | if (!eir) |
| 1381 | return; |
| 1382 | |
| 1383 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
| 1384 | |
| 1385 | i915_get_extra_instdone(dev, instdone); |
| 1386 | |
| 1387 | if (IS_G4X(dev)) { |
| 1388 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { |
| 1389 | u32 ipeir = I915_READ(IPEIR_I965); |
| 1390 | |
| 1391 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
| 1392 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
| 1393 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
| 1394 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
| 1395 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
| 1396 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
| 1397 | I915_WRITE(IPEIR_I965, ipeir); |
| 1398 | POSTING_READ(IPEIR_I965); |
| 1399 | } |
| 1400 | if (eir & GM45_ERROR_PAGE_TABLE) { |
| 1401 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
| 1402 | pr_err("page table error\n"); |
| 1403 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
| 1404 | I915_WRITE(PGTBL_ER, pgtbl_err); |
| 1405 | POSTING_READ(PGTBL_ER); |
| 1406 | } |
| 1407 | } |
| 1408 | |
| 1409 | if (!IS_GEN2(dev)) { |
| 1410 | if (eir & I915_ERROR_PAGE_TABLE) { |
| 1411 | u32 pgtbl_err = I915_READ(PGTBL_ER); |
| 1412 | pr_err("page table error\n"); |
| 1413 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); |
| 1414 | I915_WRITE(PGTBL_ER, pgtbl_err); |
| 1415 | POSTING_READ(PGTBL_ER); |
| 1416 | } |
| 1417 | } |
| 1418 | |
| 1419 | if (eir & I915_ERROR_MEMORY_REFRESH) { |
| 1420 | pr_err("memory refresh error:\n"); |
| 1421 | for_each_pipe(pipe) |
| 1422 | pr_err("pipe %c stat: 0x%08x\n", |
| 1423 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
| 1424 | /* pipestat has already been acked */ |
| 1425 | } |
| 1426 | if (eir & I915_ERROR_INSTRUCTION) { |
| 1427 | pr_err("instruction error\n"); |
| 1428 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); |
| 1429 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
| 1430 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); |
| 1431 | if (INTEL_INFO(dev)->gen < 4) { |
| 1432 | u32 ipeir = I915_READ(IPEIR); |
| 1433 | |
| 1434 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
| 1435 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); |
| 1436 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
| 1437 | I915_WRITE(IPEIR, ipeir); |
| 1438 | POSTING_READ(IPEIR); |
| 1439 | } else { |
| 1440 | u32 ipeir = I915_READ(IPEIR_I965); |
| 1441 | |
| 1442 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
| 1443 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); |
| 1444 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
| 1445 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
| 1446 | I915_WRITE(IPEIR_I965, ipeir); |
| 1447 | POSTING_READ(IPEIR_I965); |
| 1448 | } |
| 1449 | } |
| 1450 | |
| 1451 | I915_WRITE(EIR, eir); |
| 1452 | POSTING_READ(EIR); |
| 1453 | eir = I915_READ(EIR); |
| 1454 | if (eir) { |
| 1455 | /* |
| 1456 | * some errors might have become stuck, |
| 1457 | * mask them. |
| 1458 | */ |
| 1459 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); |
| 1460 | I915_WRITE(EMR, I915_READ(EMR) | eir); |
| 1461 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 1462 | } |
| 1463 | } |
| 1464 | |
| 1465 | /** |
| 1466 | * i915_handle_error - handle an error interrupt |
| 1467 | * @dev: drm device |
| 1468 | * |
| 1469 | * Do some basic checking of regsiter state at error interrupt time and |
| 1470 | * dump it to the syslog. Also call i915_capture_error_state() to make |
| 1471 | * sure we get a record and make it available in debugfs. Fire a uevent |
| 1472 | * so userspace knows something bad happened (should trigger collection |
| 1473 | * of a ring dump etc.). |
| 1474 | */ |
| 1475 | void i915_handle_error(struct drm_device *dev, bool wedged) |
| 1476 | { |
| 1477 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1478 | struct intel_ring_buffer *ring; |
| 1479 | int i; |
| 1480 | |
| 1481 | i915_capture_error_state(dev); |
| 1482 | i915_report_and_clear_eir(dev); |
| 1483 | |
| 1484 | if (wedged) { |
| 1485 | INIT_COMPLETION(dev_priv->gpu_error.completion); |
| 1486 | atomic_set(&dev_priv->gpu_error.wedged, 1); |
| 1487 | |
| 1488 | /* |
| 1489 | * Wakeup waiting processes so they don't hang |
| 1490 | */ |
| 1491 | for_each_ring(ring, dev_priv, i) |
| 1492 | wake_up_all(&ring->irq_queue); |
| 1493 | } |
| 1494 | |
| 1495 | queue_work(dev_priv->wq, &dev_priv->gpu_error.work); |
| 1496 | } |
| 1497 | |
| 1498 | static void i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
| 1499 | { |
| 1500 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1501 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; |
| 1502 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
| 1503 | struct drm_i915_gem_object *obj; |
| 1504 | struct intel_unpin_work *work; |
| 1505 | unsigned long flags; |
| 1506 | bool stall_detected; |
| 1507 | |
| 1508 | /* Ignore early vblank irqs */ |
| 1509 | if (intel_crtc == NULL) |
| 1510 | return; |
| 1511 | |
| 1512 | spin_lock_irqsave(&dev->event_lock, flags); |
| 1513 | work = intel_crtc->unpin_work; |
| 1514 | |
| 1515 | if (work == NULL || |
| 1516 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || |
| 1517 | !work->enable_stall_check) { |
| 1518 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
| 1519 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 1520 | return; |
| 1521 | } |
| 1522 | |
| 1523 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ |
| 1524 | obj = work->pending_flip_obj; |
| 1525 | if (INTEL_INFO(dev)->gen >= 4) { |
| 1526 | int dspsurf = DSPSURF(intel_crtc->plane); |
| 1527 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
| 1528 | obj->gtt_offset; |
| 1529 | } else { |
| 1530 | int dspaddr = DSPADDR(intel_crtc->plane); |
| 1531 | stall_detected = I915_READ(dspaddr) == (obj->gtt_offset + |
| 1532 | crtc->y * crtc->fb->pitches[0] + |
| 1533 | crtc->x * crtc->fb->bits_per_pixel/8); |
| 1534 | } |
| 1535 | |
| 1536 | spin_unlock_irqrestore(&dev->event_lock, flags); |
| 1537 | |
| 1538 | if (stall_detected) { |
| 1539 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); |
| 1540 | intel_prepare_page_flip(dev, intel_crtc->plane); |
| 1541 | } |
| 1542 | } |
| 1543 | |
| 1544 | /* Called from drm generic code, passed 'crtc' which |
| 1545 | * we use as a pipe index |
| 1546 | */ |
| 1547 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
| 1548 | { |
| 1549 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1550 | unsigned long irqflags; |
| 1551 | |
| 1552 | if (!i915_pipe_enabled(dev, pipe)) |
| 1553 | return -EINVAL; |
| 1554 | |
| 1555 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1556 | if (INTEL_INFO(dev)->gen >= 4) |
| 1557 | i915_enable_pipestat(dev_priv, pipe, |
| 1558 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
| 1559 | else |
| 1560 | i915_enable_pipestat(dev_priv, pipe, |
| 1561 | PIPE_VBLANK_INTERRUPT_ENABLE); |
| 1562 | |
| 1563 | /* maintain vblank delivery even in deep C-states */ |
| 1564 | if (dev_priv->info->gen == 3) |
| 1565 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); |
| 1566 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1567 | |
| 1568 | return 0; |
| 1569 | } |
| 1570 | |
| 1571 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
| 1572 | { |
| 1573 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1574 | unsigned long irqflags; |
| 1575 | |
| 1576 | if (!i915_pipe_enabled(dev, pipe)) |
| 1577 | return -EINVAL; |
| 1578 | |
| 1579 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1580 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? |
| 1581 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
| 1582 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1583 | |
| 1584 | return 0; |
| 1585 | } |
| 1586 | |
| 1587 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) |
| 1588 | { |
| 1589 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1590 | unsigned long irqflags; |
| 1591 | |
| 1592 | if (!i915_pipe_enabled(dev, pipe)) |
| 1593 | return -EINVAL; |
| 1594 | |
| 1595 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1596 | ironlake_enable_display_irq(dev_priv, |
| 1597 | DE_PIPEA_VBLANK_IVB << (5 * pipe)); |
| 1598 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1599 | |
| 1600 | return 0; |
| 1601 | } |
| 1602 | |
| 1603 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
| 1604 | { |
| 1605 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1606 | unsigned long irqflags; |
| 1607 | u32 imr; |
| 1608 | |
| 1609 | if (!i915_pipe_enabled(dev, pipe)) |
| 1610 | return -EINVAL; |
| 1611 | |
| 1612 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1613 | imr = I915_READ(VLV_IMR); |
| 1614 | if (pipe == 0) |
| 1615 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
| 1616 | else |
| 1617 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
| 1618 | I915_WRITE(VLV_IMR, imr); |
| 1619 | i915_enable_pipestat(dev_priv, pipe, |
| 1620 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
| 1621 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1622 | |
| 1623 | return 0; |
| 1624 | } |
| 1625 | |
| 1626 | /* Called from drm generic code, passed 'crtc' which |
| 1627 | * we use as a pipe index |
| 1628 | */ |
| 1629 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
| 1630 | { |
| 1631 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1632 | unsigned long irqflags; |
| 1633 | |
| 1634 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1635 | if (dev_priv->info->gen == 3) |
| 1636 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); |
| 1637 | |
| 1638 | i915_disable_pipestat(dev_priv, pipe, |
| 1639 | PIPE_VBLANK_INTERRUPT_ENABLE | |
| 1640 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
| 1641 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1642 | } |
| 1643 | |
| 1644 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
| 1645 | { |
| 1646 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1647 | unsigned long irqflags; |
| 1648 | |
| 1649 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1650 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? |
| 1651 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
| 1652 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1653 | } |
| 1654 | |
| 1655 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) |
| 1656 | { |
| 1657 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1658 | unsigned long irqflags; |
| 1659 | |
| 1660 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1661 | ironlake_disable_display_irq(dev_priv, |
| 1662 | DE_PIPEA_VBLANK_IVB << (pipe * 5)); |
| 1663 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1664 | } |
| 1665 | |
| 1666 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
| 1667 | { |
| 1668 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1669 | unsigned long irqflags; |
| 1670 | u32 imr; |
| 1671 | |
| 1672 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 1673 | i915_disable_pipestat(dev_priv, pipe, |
| 1674 | PIPE_START_VBLANK_INTERRUPT_ENABLE); |
| 1675 | imr = I915_READ(VLV_IMR); |
| 1676 | if (pipe == 0) |
| 1677 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
| 1678 | else |
| 1679 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
| 1680 | I915_WRITE(VLV_IMR, imr); |
| 1681 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 1682 | } |
| 1683 | |
| 1684 | static u32 |
| 1685 | ring_last_seqno(struct intel_ring_buffer *ring) |
| 1686 | { |
| 1687 | return list_entry(ring->request_list.prev, |
| 1688 | struct drm_i915_gem_request, list)->seqno; |
| 1689 | } |
| 1690 | |
| 1691 | static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err) |
| 1692 | { |
| 1693 | if (list_empty(&ring->request_list) || |
| 1694 | i915_seqno_passed(ring->get_seqno(ring, false), |
| 1695 | ring_last_seqno(ring))) { |
| 1696 | /* Issue a wake-up to catch stuck h/w. */ |
| 1697 | if (waitqueue_active(&ring->irq_queue)) { |
| 1698 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", |
| 1699 | ring->name); |
| 1700 | wake_up_all(&ring->irq_queue); |
| 1701 | *err = true; |
| 1702 | } |
| 1703 | return true; |
| 1704 | } |
| 1705 | return false; |
| 1706 | } |
| 1707 | |
| 1708 | static bool kick_ring(struct intel_ring_buffer *ring) |
| 1709 | { |
| 1710 | struct drm_device *dev = ring->dev; |
| 1711 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 1712 | u32 tmp = I915_READ_CTL(ring); |
| 1713 | if (tmp & RING_WAIT) { |
| 1714 | DRM_ERROR("Kicking stuck wait on %s\n", |
| 1715 | ring->name); |
| 1716 | I915_WRITE_CTL(ring, tmp); |
| 1717 | return true; |
| 1718 | } |
| 1719 | return false; |
| 1720 | } |
| 1721 | |
| 1722 | static bool i915_hangcheck_hung(struct drm_device *dev) |
| 1723 | { |
| 1724 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1725 | |
| 1726 | if (dev_priv->gpu_error.hangcheck_count++ > 1) { |
| 1727 | bool hung = true; |
| 1728 | |
| 1729 | DRM_ERROR("Hangcheck timer elapsed... GPU hung\n"); |
| 1730 | i915_handle_error(dev, true); |
| 1731 | |
| 1732 | if (!IS_GEN2(dev)) { |
| 1733 | struct intel_ring_buffer *ring; |
| 1734 | int i; |
| 1735 | |
| 1736 | /* Is the chip hanging on a WAIT_FOR_EVENT? |
| 1737 | * If so we can simply poke the RB_WAIT bit |
| 1738 | * and break the hang. This should work on |
| 1739 | * all but the second generation chipsets. |
| 1740 | */ |
| 1741 | for_each_ring(ring, dev_priv, i) |
| 1742 | hung &= !kick_ring(ring); |
| 1743 | } |
| 1744 | |
| 1745 | return hung; |
| 1746 | } |
| 1747 | |
| 1748 | return false; |
| 1749 | } |
| 1750 | |
| 1751 | /** |
| 1752 | * This is called when the chip hasn't reported back with completed |
| 1753 | * batchbuffers in a long time. The first time this is called we simply record |
| 1754 | * ACTHD. If ACTHD hasn't changed by the time the hangcheck timer elapses |
| 1755 | * again, we assume the chip is wedged and try to fix it. |
| 1756 | */ |
| 1757 | void i915_hangcheck_elapsed(unsigned long data) |
| 1758 | { |
| 1759 | struct drm_device *dev = (struct drm_device *)data; |
| 1760 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 1761 | uint32_t acthd[I915_NUM_RINGS], instdone[I915_NUM_INSTDONE_REG]; |
| 1762 | struct intel_ring_buffer *ring; |
| 1763 | bool err = false, idle; |
| 1764 | int i; |
| 1765 | |
| 1766 | if (!i915_enable_hangcheck) |
| 1767 | return; |
| 1768 | |
| 1769 | memset(acthd, 0, sizeof(acthd)); |
| 1770 | idle = true; |
| 1771 | for_each_ring(ring, dev_priv, i) { |
| 1772 | idle &= i915_hangcheck_ring_idle(ring, &err); |
| 1773 | acthd[i] = intel_ring_get_active_head(ring); |
| 1774 | } |
| 1775 | |
| 1776 | /* If all work is done then ACTHD clearly hasn't advanced. */ |
| 1777 | if (idle) { |
| 1778 | if (err) { |
| 1779 | if (i915_hangcheck_hung(dev)) |
| 1780 | return; |
| 1781 | |
| 1782 | goto repeat; |
| 1783 | } |
| 1784 | |
| 1785 | dev_priv->gpu_error.hangcheck_count = 0; |
| 1786 | return; |
| 1787 | } |
| 1788 | |
| 1789 | i915_get_extra_instdone(dev, instdone); |
| 1790 | if (memcmp(dev_priv->gpu_error.last_acthd, acthd, |
| 1791 | sizeof(acthd)) == 0 && |
| 1792 | memcmp(dev_priv->gpu_error.prev_instdone, instdone, |
| 1793 | sizeof(instdone)) == 0) { |
| 1794 | if (i915_hangcheck_hung(dev)) |
| 1795 | return; |
| 1796 | } else { |
| 1797 | dev_priv->gpu_error.hangcheck_count = 0; |
| 1798 | |
| 1799 | memcpy(dev_priv->gpu_error.last_acthd, acthd, |
| 1800 | sizeof(acthd)); |
| 1801 | memcpy(dev_priv->gpu_error.prev_instdone, instdone, |
| 1802 | sizeof(instdone)); |
| 1803 | } |
| 1804 | |
| 1805 | repeat: |
| 1806 | /* Reset timer case chip hangs without another request being added */ |
| 1807 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
| 1808 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
| 1809 | } |
| 1810 | |
| 1811 | /* drm_dma.h hooks |
| 1812 | */ |
| 1813 | static void ironlake_irq_preinstall(struct drm_device *dev) |
| 1814 | { |
| 1815 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1816 | |
| 1817 | atomic_set(&dev_priv->irq_received, 0); |
| 1818 | |
| 1819 | I915_WRITE(HWSTAM, 0xeffe); |
| 1820 | |
| 1821 | /* XXX hotplug from PCH */ |
| 1822 | |
| 1823 | I915_WRITE(DEIMR, 0xffffffff); |
| 1824 | I915_WRITE(DEIER, 0x0); |
| 1825 | POSTING_READ(DEIER); |
| 1826 | |
| 1827 | /* and GT */ |
| 1828 | I915_WRITE(GTIMR, 0xffffffff); |
| 1829 | I915_WRITE(GTIER, 0x0); |
| 1830 | POSTING_READ(GTIER); |
| 1831 | |
| 1832 | /* south display irq */ |
| 1833 | I915_WRITE(SDEIMR, 0xffffffff); |
| 1834 | I915_WRITE(SDEIER, 0x0); |
| 1835 | POSTING_READ(SDEIER); |
| 1836 | } |
| 1837 | |
| 1838 | static void valleyview_irq_preinstall(struct drm_device *dev) |
| 1839 | { |
| 1840 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1841 | int pipe; |
| 1842 | |
| 1843 | atomic_set(&dev_priv->irq_received, 0); |
| 1844 | |
| 1845 | /* VLV magic */ |
| 1846 | I915_WRITE(VLV_IMR, 0); |
| 1847 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); |
| 1848 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); |
| 1849 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); |
| 1850 | |
| 1851 | /* and GT */ |
| 1852 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 1853 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 1854 | I915_WRITE(GTIMR, 0xffffffff); |
| 1855 | I915_WRITE(GTIER, 0x0); |
| 1856 | POSTING_READ(GTIER); |
| 1857 | |
| 1858 | I915_WRITE(DPINVGTT, 0xff); |
| 1859 | |
| 1860 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 1861 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 1862 | for_each_pipe(pipe) |
| 1863 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
| 1864 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 1865 | I915_WRITE(VLV_IMR, 0xffffffff); |
| 1866 | I915_WRITE(VLV_IER, 0x0); |
| 1867 | POSTING_READ(VLV_IER); |
| 1868 | } |
| 1869 | |
| 1870 | /* |
| 1871 | * Enable digital hotplug on the PCH, and configure the DP short pulse |
| 1872 | * duration to 2ms (which is the minimum in the Display Port spec) |
| 1873 | * |
| 1874 | * This register is the same on all known PCH chips. |
| 1875 | */ |
| 1876 | |
| 1877 | static void ironlake_enable_pch_hotplug(struct drm_device *dev) |
| 1878 | { |
| 1879 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1880 | u32 hotplug; |
| 1881 | |
| 1882 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
| 1883 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); |
| 1884 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; |
| 1885 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; |
| 1886 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; |
| 1887 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); |
| 1888 | } |
| 1889 | |
| 1890 | static int ironlake_irq_postinstall(struct drm_device *dev) |
| 1891 | { |
| 1892 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1893 | /* enable kind of interrupts always enabled */ |
| 1894 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
| 1895 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
| 1896 | DE_AUX_CHANNEL_A; |
| 1897 | u32 render_irqs; |
| 1898 | u32 hotplug_mask; |
| 1899 | u32 pch_irq_mask; |
| 1900 | |
| 1901 | dev_priv->irq_mask = ~display_mask; |
| 1902 | |
| 1903 | /* should always can generate irq */ |
| 1904 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
| 1905 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
| 1906 | I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK); |
| 1907 | POSTING_READ(DEIER); |
| 1908 | |
| 1909 | dev_priv->gt_irq_mask = ~0; |
| 1910 | |
| 1911 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 1912 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
| 1913 | |
| 1914 | if (IS_GEN6(dev)) |
| 1915 | render_irqs = |
| 1916 | GT_USER_INTERRUPT | |
| 1917 | GEN6_BSD_USER_INTERRUPT | |
| 1918 | GEN6_BLITTER_USER_INTERRUPT; |
| 1919 | else |
| 1920 | render_irqs = |
| 1921 | GT_USER_INTERRUPT | |
| 1922 | GT_PIPE_NOTIFY | |
| 1923 | GT_BSD_USER_INTERRUPT; |
| 1924 | I915_WRITE(GTIER, render_irqs); |
| 1925 | POSTING_READ(GTIER); |
| 1926 | |
| 1927 | if (HAS_PCH_CPT(dev)) { |
| 1928 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | |
| 1929 | SDE_PORTB_HOTPLUG_CPT | |
| 1930 | SDE_PORTC_HOTPLUG_CPT | |
| 1931 | SDE_PORTD_HOTPLUG_CPT | |
| 1932 | SDE_GMBUS_CPT | |
| 1933 | SDE_AUX_MASK_CPT); |
| 1934 | } else { |
| 1935 | hotplug_mask = (SDE_CRT_HOTPLUG | |
| 1936 | SDE_PORTB_HOTPLUG | |
| 1937 | SDE_PORTC_HOTPLUG | |
| 1938 | SDE_PORTD_HOTPLUG | |
| 1939 | SDE_GMBUS | |
| 1940 | SDE_AUX_MASK); |
| 1941 | } |
| 1942 | |
| 1943 | pch_irq_mask = ~hotplug_mask; |
| 1944 | |
| 1945 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
| 1946 | I915_WRITE(SDEIMR, pch_irq_mask); |
| 1947 | I915_WRITE(SDEIER, hotplug_mask); |
| 1948 | POSTING_READ(SDEIER); |
| 1949 | |
| 1950 | ironlake_enable_pch_hotplug(dev); |
| 1951 | |
| 1952 | if (IS_IRONLAKE_M(dev)) { |
| 1953 | /* Clear & enable PCU event interrupts */ |
| 1954 | I915_WRITE(DEIIR, DE_PCU_EVENT); |
| 1955 | I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT); |
| 1956 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
| 1957 | } |
| 1958 | |
| 1959 | return 0; |
| 1960 | } |
| 1961 | |
| 1962 | static int ivybridge_irq_postinstall(struct drm_device *dev) |
| 1963 | { |
| 1964 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 1965 | /* enable kind of interrupts always enabled */ |
| 1966 | u32 display_mask = |
| 1967 | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | |
| 1968 | DE_PLANEC_FLIP_DONE_IVB | |
| 1969 | DE_PLANEB_FLIP_DONE_IVB | |
| 1970 | DE_PLANEA_FLIP_DONE_IVB | |
| 1971 | DE_AUX_CHANNEL_A_IVB; |
| 1972 | u32 render_irqs; |
| 1973 | u32 hotplug_mask; |
| 1974 | u32 pch_irq_mask; |
| 1975 | |
| 1976 | dev_priv->irq_mask = ~display_mask; |
| 1977 | |
| 1978 | /* should always can generate irq */ |
| 1979 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
| 1980 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
| 1981 | I915_WRITE(DEIER, |
| 1982 | display_mask | |
| 1983 | DE_PIPEC_VBLANK_IVB | |
| 1984 | DE_PIPEB_VBLANK_IVB | |
| 1985 | DE_PIPEA_VBLANK_IVB); |
| 1986 | POSTING_READ(DEIER); |
| 1987 | |
| 1988 | dev_priv->gt_irq_mask = ~GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
| 1989 | |
| 1990 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 1991 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
| 1992 | |
| 1993 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | |
| 1994 | GEN6_BLITTER_USER_INTERRUPT | GT_GEN7_L3_PARITY_ERROR_INTERRUPT; |
| 1995 | I915_WRITE(GTIER, render_irqs); |
| 1996 | POSTING_READ(GTIER); |
| 1997 | |
| 1998 | hotplug_mask = (SDE_CRT_HOTPLUG_CPT | |
| 1999 | SDE_PORTB_HOTPLUG_CPT | |
| 2000 | SDE_PORTC_HOTPLUG_CPT | |
| 2001 | SDE_PORTD_HOTPLUG_CPT | |
| 2002 | SDE_GMBUS_CPT | |
| 2003 | SDE_AUX_MASK_CPT); |
| 2004 | pch_irq_mask = ~hotplug_mask; |
| 2005 | |
| 2006 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
| 2007 | I915_WRITE(SDEIMR, pch_irq_mask); |
| 2008 | I915_WRITE(SDEIER, hotplug_mask); |
| 2009 | POSTING_READ(SDEIER); |
| 2010 | |
| 2011 | ironlake_enable_pch_hotplug(dev); |
| 2012 | |
| 2013 | return 0; |
| 2014 | } |
| 2015 | |
| 2016 | static int valleyview_irq_postinstall(struct drm_device *dev) |
| 2017 | { |
| 2018 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2019 | u32 enable_mask; |
| 2020 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
| 2021 | u32 render_irqs; |
| 2022 | u16 msid; |
| 2023 | |
| 2024 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; |
| 2025 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2026 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | |
| 2027 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2028 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
| 2029 | |
| 2030 | /* |
| 2031 | *Leave vblank interrupts masked initially. enable/disable will |
| 2032 | * toggle them based on usage. |
| 2033 | */ |
| 2034 | dev_priv->irq_mask = (~enable_mask) | |
| 2035 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | |
| 2036 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
| 2037 | |
| 2038 | dev_priv->pipestat[0] = 0; |
| 2039 | dev_priv->pipestat[1] = 0; |
| 2040 | |
| 2041 | /* Hack for broken MSIs on VLV */ |
| 2042 | pci_write_config_dword(dev_priv->dev->pdev, 0x94, 0xfee00000); |
| 2043 | pci_read_config_word(dev->pdev, 0x98, &msid); |
| 2044 | msid &= 0xff; /* mask out delivery bits */ |
| 2045 | msid |= (1<<14); |
| 2046 | pci_write_config_word(dev_priv->dev->pdev, 0x98, msid); |
| 2047 | |
| 2048 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2049 | POSTING_READ(PORT_HOTPLUG_EN); |
| 2050 | |
| 2051 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
| 2052 | I915_WRITE(VLV_IER, enable_mask); |
| 2053 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2054 | I915_WRITE(PIPESTAT(0), 0xffff); |
| 2055 | I915_WRITE(PIPESTAT(1), 0xffff); |
| 2056 | POSTING_READ(VLV_IER); |
| 2057 | |
| 2058 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); |
| 2059 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
| 2060 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); |
| 2061 | |
| 2062 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2063 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2064 | |
| 2065 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 2066 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
| 2067 | |
| 2068 | render_irqs = GT_USER_INTERRUPT | GEN6_BSD_USER_INTERRUPT | |
| 2069 | GEN6_BLITTER_USER_INTERRUPT; |
| 2070 | I915_WRITE(GTIER, render_irqs); |
| 2071 | POSTING_READ(GTIER); |
| 2072 | |
| 2073 | /* ack & enable invalid PTE error interrupts */ |
| 2074 | #if 0 /* FIXME: add support to irq handler for checking these bits */ |
| 2075 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); |
| 2076 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); |
| 2077 | #endif |
| 2078 | |
| 2079 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); |
| 2080 | |
| 2081 | return 0; |
| 2082 | } |
| 2083 | |
| 2084 | static void valleyview_hpd_irq_setup(struct drm_device *dev) |
| 2085 | { |
| 2086 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2087 | u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
| 2088 | |
| 2089 | /* Note HDMI and DP share bits */ |
| 2090 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
| 2091 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; |
| 2092 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) |
| 2093 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
| 2094 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
| 2095 | hotplug_en |= HDMID_HOTPLUG_INT_EN; |
| 2096 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) |
| 2097 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
| 2098 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) |
| 2099 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; |
| 2100 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { |
| 2101 | hotplug_en |= CRT_HOTPLUG_INT_EN; |
| 2102 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
| 2103 | } |
| 2104 | |
| 2105 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
| 2106 | } |
| 2107 | |
| 2108 | static void valleyview_irq_uninstall(struct drm_device *dev) |
| 2109 | { |
| 2110 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2111 | int pipe; |
| 2112 | |
| 2113 | if (!dev_priv) |
| 2114 | return; |
| 2115 | |
| 2116 | for_each_pipe(pipe) |
| 2117 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
| 2118 | |
| 2119 | I915_WRITE(HWSTAM, 0xffffffff); |
| 2120 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2121 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2122 | for_each_pipe(pipe) |
| 2123 | I915_WRITE(PIPESTAT(pipe), 0xffff); |
| 2124 | I915_WRITE(VLV_IIR, 0xffffffff); |
| 2125 | I915_WRITE(VLV_IMR, 0xffffffff); |
| 2126 | I915_WRITE(VLV_IER, 0x0); |
| 2127 | POSTING_READ(VLV_IER); |
| 2128 | } |
| 2129 | |
| 2130 | static void ironlake_irq_uninstall(struct drm_device *dev) |
| 2131 | { |
| 2132 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2133 | |
| 2134 | if (!dev_priv) |
| 2135 | return; |
| 2136 | |
| 2137 | I915_WRITE(HWSTAM, 0xffffffff); |
| 2138 | |
| 2139 | I915_WRITE(DEIMR, 0xffffffff); |
| 2140 | I915_WRITE(DEIER, 0x0); |
| 2141 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
| 2142 | |
| 2143 | I915_WRITE(GTIMR, 0xffffffff); |
| 2144 | I915_WRITE(GTIER, 0x0); |
| 2145 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
| 2146 | |
| 2147 | I915_WRITE(SDEIMR, 0xffffffff); |
| 2148 | I915_WRITE(SDEIER, 0x0); |
| 2149 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
| 2150 | } |
| 2151 | |
| 2152 | static void i8xx_irq_preinstall(struct drm_device * dev) |
| 2153 | { |
| 2154 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2155 | int pipe; |
| 2156 | |
| 2157 | atomic_set(&dev_priv->irq_received, 0); |
| 2158 | |
| 2159 | for_each_pipe(pipe) |
| 2160 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2161 | I915_WRITE16(IMR, 0xffff); |
| 2162 | I915_WRITE16(IER, 0x0); |
| 2163 | POSTING_READ16(IER); |
| 2164 | } |
| 2165 | |
| 2166 | static int i8xx_irq_postinstall(struct drm_device *dev) |
| 2167 | { |
| 2168 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2169 | |
| 2170 | dev_priv->pipestat[0] = 0; |
| 2171 | dev_priv->pipestat[1] = 0; |
| 2172 | |
| 2173 | I915_WRITE16(EMR, |
| 2174 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
| 2175 | |
| 2176 | /* Unmask the interrupts that we always want on. */ |
| 2177 | dev_priv->irq_mask = |
| 2178 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2179 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2180 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2181 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
| 2182 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 2183 | I915_WRITE16(IMR, dev_priv->irq_mask); |
| 2184 | |
| 2185 | I915_WRITE16(IER, |
| 2186 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2187 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2188 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
| 2189 | I915_USER_INTERRUPT); |
| 2190 | POSTING_READ16(IER); |
| 2191 | |
| 2192 | return 0; |
| 2193 | } |
| 2194 | |
| 2195 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
| 2196 | { |
| 2197 | struct drm_device *dev = (struct drm_device *) arg; |
| 2198 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2199 | u16 iir, new_iir; |
| 2200 | u32 pipe_stats[2]; |
| 2201 | unsigned long irqflags; |
| 2202 | int irq_received; |
| 2203 | int pipe; |
| 2204 | u16 flip_mask = |
| 2205 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2206 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
| 2207 | |
| 2208 | atomic_inc(&dev_priv->irq_received); |
| 2209 | |
| 2210 | iir = I915_READ16(IIR); |
| 2211 | if (iir == 0) |
| 2212 | return IRQ_NONE; |
| 2213 | |
| 2214 | while (iir & ~flip_mask) { |
| 2215 | /* Can't rely on pipestat interrupt bit in iir as it might |
| 2216 | * have been cleared after the pipestat interrupt was received. |
| 2217 | * It doesn't set the bit in iir again, but it still produces |
| 2218 | * interrupts (for non-MSI). |
| 2219 | */ |
| 2220 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 2221 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
| 2222 | i915_handle_error(dev, false); |
| 2223 | |
| 2224 | for_each_pipe(pipe) { |
| 2225 | int reg = PIPESTAT(pipe); |
| 2226 | pipe_stats[pipe] = I915_READ(reg); |
| 2227 | |
| 2228 | /* |
| 2229 | * Clear the PIPE*STAT regs before the IIR |
| 2230 | */ |
| 2231 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 2232 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 2233 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 2234 | pipe_name(pipe)); |
| 2235 | I915_WRITE(reg, pipe_stats[pipe]); |
| 2236 | irq_received = 1; |
| 2237 | } |
| 2238 | } |
| 2239 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 2240 | |
| 2241 | I915_WRITE16(IIR, iir & ~flip_mask); |
| 2242 | new_iir = I915_READ16(IIR); /* Flush posted writes */ |
| 2243 | |
| 2244 | i915_update_dri1_breadcrumb(dev); |
| 2245 | |
| 2246 | if (iir & I915_USER_INTERRUPT) |
| 2247 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 2248 | |
| 2249 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && |
| 2250 | drm_handle_vblank(dev, 0)) { |
| 2251 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) { |
| 2252 | intel_prepare_page_flip(dev, 0); |
| 2253 | intel_finish_page_flip(dev, 0); |
| 2254 | flip_mask &= ~I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT; |
| 2255 | } |
| 2256 | } |
| 2257 | |
| 2258 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && |
| 2259 | drm_handle_vblank(dev, 1)) { |
| 2260 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) { |
| 2261 | intel_prepare_page_flip(dev, 1); |
| 2262 | intel_finish_page_flip(dev, 1); |
| 2263 | flip_mask &= ~I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
| 2264 | } |
| 2265 | } |
| 2266 | |
| 2267 | iir = new_iir; |
| 2268 | } |
| 2269 | |
| 2270 | return IRQ_HANDLED; |
| 2271 | } |
| 2272 | |
| 2273 | static void i8xx_irq_uninstall(struct drm_device * dev) |
| 2274 | { |
| 2275 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2276 | int pipe; |
| 2277 | |
| 2278 | for_each_pipe(pipe) { |
| 2279 | /* Clear enable bits; then clear status bits */ |
| 2280 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2281 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
| 2282 | } |
| 2283 | I915_WRITE16(IMR, 0xffff); |
| 2284 | I915_WRITE16(IER, 0x0); |
| 2285 | I915_WRITE16(IIR, I915_READ16(IIR)); |
| 2286 | } |
| 2287 | |
| 2288 | static void i915_irq_preinstall(struct drm_device * dev) |
| 2289 | { |
| 2290 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2291 | int pipe; |
| 2292 | |
| 2293 | atomic_set(&dev_priv->irq_received, 0); |
| 2294 | |
| 2295 | if (I915_HAS_HOTPLUG(dev)) { |
| 2296 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2297 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2298 | } |
| 2299 | |
| 2300 | I915_WRITE16(HWSTAM, 0xeffe); |
| 2301 | for_each_pipe(pipe) |
| 2302 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2303 | I915_WRITE(IMR, 0xffffffff); |
| 2304 | I915_WRITE(IER, 0x0); |
| 2305 | POSTING_READ(IER); |
| 2306 | } |
| 2307 | |
| 2308 | static int i915_irq_postinstall(struct drm_device *dev) |
| 2309 | { |
| 2310 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2311 | u32 enable_mask; |
| 2312 | |
| 2313 | dev_priv->pipestat[0] = 0; |
| 2314 | dev_priv->pipestat[1] = 0; |
| 2315 | |
| 2316 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
| 2317 | |
| 2318 | /* Unmask the interrupts that we always want on. */ |
| 2319 | dev_priv->irq_mask = |
| 2320 | ~(I915_ASLE_INTERRUPT | |
| 2321 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2322 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2323 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2324 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
| 2325 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 2326 | |
| 2327 | enable_mask = |
| 2328 | I915_ASLE_INTERRUPT | |
| 2329 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2330 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2331 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | |
| 2332 | I915_USER_INTERRUPT; |
| 2333 | |
| 2334 | if (I915_HAS_HOTPLUG(dev)) { |
| 2335 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2336 | POSTING_READ(PORT_HOTPLUG_EN); |
| 2337 | |
| 2338 | /* Enable in IER... */ |
| 2339 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; |
| 2340 | /* and unmask in IMR */ |
| 2341 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; |
| 2342 | } |
| 2343 | |
| 2344 | I915_WRITE(IMR, dev_priv->irq_mask); |
| 2345 | I915_WRITE(IER, enable_mask); |
| 2346 | POSTING_READ(IER); |
| 2347 | |
| 2348 | intel_opregion_enable_asle(dev); |
| 2349 | |
| 2350 | return 0; |
| 2351 | } |
| 2352 | |
| 2353 | static void i915_hpd_irq_setup(struct drm_device *dev) |
| 2354 | { |
| 2355 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2356 | u32 hotplug_en; |
| 2357 | |
| 2358 | if (I915_HAS_HOTPLUG(dev)) { |
| 2359 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); |
| 2360 | |
| 2361 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
| 2362 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; |
| 2363 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) |
| 2364 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
| 2365 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
| 2366 | hotplug_en |= HDMID_HOTPLUG_INT_EN; |
| 2367 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I915) |
| 2368 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
| 2369 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I915) |
| 2370 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; |
| 2371 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { |
| 2372 | hotplug_en |= CRT_HOTPLUG_INT_EN; |
| 2373 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
| 2374 | } |
| 2375 | |
| 2376 | /* Ignore TV since it's buggy */ |
| 2377 | |
| 2378 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
| 2379 | } |
| 2380 | } |
| 2381 | |
| 2382 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
| 2383 | { |
| 2384 | struct drm_device *dev = (struct drm_device *) arg; |
| 2385 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2386 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
| 2387 | unsigned long irqflags; |
| 2388 | u32 flip_mask = |
| 2389 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2390 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; |
| 2391 | u32 flip[2] = { |
| 2392 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT, |
| 2393 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |
| 2394 | }; |
| 2395 | int pipe, ret = IRQ_NONE; |
| 2396 | |
| 2397 | atomic_inc(&dev_priv->irq_received); |
| 2398 | |
| 2399 | iir = I915_READ(IIR); |
| 2400 | do { |
| 2401 | bool irq_received = (iir & ~flip_mask) != 0; |
| 2402 | bool blc_event = false; |
| 2403 | |
| 2404 | /* Can't rely on pipestat interrupt bit in iir as it might |
| 2405 | * have been cleared after the pipestat interrupt was received. |
| 2406 | * It doesn't set the bit in iir again, but it still produces |
| 2407 | * interrupts (for non-MSI). |
| 2408 | */ |
| 2409 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 2410 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
| 2411 | i915_handle_error(dev, false); |
| 2412 | |
| 2413 | for_each_pipe(pipe) { |
| 2414 | int reg = PIPESTAT(pipe); |
| 2415 | pipe_stats[pipe] = I915_READ(reg); |
| 2416 | |
| 2417 | /* Clear the PIPE*STAT regs before the IIR */ |
| 2418 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 2419 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 2420 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 2421 | pipe_name(pipe)); |
| 2422 | I915_WRITE(reg, pipe_stats[pipe]); |
| 2423 | irq_received = true; |
| 2424 | } |
| 2425 | } |
| 2426 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 2427 | |
| 2428 | if (!irq_received) |
| 2429 | break; |
| 2430 | |
| 2431 | /* Consume port. Then clear IIR or we'll miss events */ |
| 2432 | if ((I915_HAS_HOTPLUG(dev)) && |
| 2433 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { |
| 2434 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
| 2435 | |
| 2436 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
| 2437 | hotplug_status); |
| 2438 | if (hotplug_status & dev_priv->hotplug_supported_mask) |
| 2439 | queue_work(dev_priv->wq, |
| 2440 | &dev_priv->hotplug_work); |
| 2441 | |
| 2442 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 2443 | POSTING_READ(PORT_HOTPLUG_STAT); |
| 2444 | } |
| 2445 | |
| 2446 | I915_WRITE(IIR, iir & ~flip_mask); |
| 2447 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
| 2448 | |
| 2449 | if (iir & I915_USER_INTERRUPT) |
| 2450 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 2451 | |
| 2452 | for_each_pipe(pipe) { |
| 2453 | int plane = pipe; |
| 2454 | if (IS_MOBILE(dev)) |
| 2455 | plane = !plane; |
| 2456 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
| 2457 | drm_handle_vblank(dev, pipe)) { |
| 2458 | if (iir & flip[plane]) { |
| 2459 | intel_prepare_page_flip(dev, plane); |
| 2460 | intel_finish_page_flip(dev, pipe); |
| 2461 | flip_mask &= ~flip[plane]; |
| 2462 | } |
| 2463 | } |
| 2464 | |
| 2465 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
| 2466 | blc_event = true; |
| 2467 | } |
| 2468 | |
| 2469 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
| 2470 | intel_opregion_asle_intr(dev); |
| 2471 | |
| 2472 | /* With MSI, interrupts are only generated when iir |
| 2473 | * transitions from zero to nonzero. If another bit got |
| 2474 | * set while we were handling the existing iir bits, then |
| 2475 | * we would never get another interrupt. |
| 2476 | * |
| 2477 | * This is fine on non-MSI as well, as if we hit this path |
| 2478 | * we avoid exiting the interrupt handler only to generate |
| 2479 | * another one. |
| 2480 | * |
| 2481 | * Note that for MSI this could cause a stray interrupt report |
| 2482 | * if an interrupt landed in the time between writing IIR and |
| 2483 | * the posting read. This should be rare enough to never |
| 2484 | * trigger the 99% of 100,000 interrupts test for disabling |
| 2485 | * stray interrupts. |
| 2486 | */ |
| 2487 | ret = IRQ_HANDLED; |
| 2488 | iir = new_iir; |
| 2489 | } while (iir & ~flip_mask); |
| 2490 | |
| 2491 | i915_update_dri1_breadcrumb(dev); |
| 2492 | |
| 2493 | return ret; |
| 2494 | } |
| 2495 | |
| 2496 | static void i915_irq_uninstall(struct drm_device * dev) |
| 2497 | { |
| 2498 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2499 | int pipe; |
| 2500 | |
| 2501 | if (I915_HAS_HOTPLUG(dev)) { |
| 2502 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2503 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2504 | } |
| 2505 | |
| 2506 | I915_WRITE16(HWSTAM, 0xffff); |
| 2507 | for_each_pipe(pipe) { |
| 2508 | /* Clear enable bits; then clear status bits */ |
| 2509 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2510 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
| 2511 | } |
| 2512 | I915_WRITE(IMR, 0xffffffff); |
| 2513 | I915_WRITE(IER, 0x0); |
| 2514 | |
| 2515 | I915_WRITE(IIR, I915_READ(IIR)); |
| 2516 | } |
| 2517 | |
| 2518 | static void i965_irq_preinstall(struct drm_device * dev) |
| 2519 | { |
| 2520 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2521 | int pipe; |
| 2522 | |
| 2523 | atomic_set(&dev_priv->irq_received, 0); |
| 2524 | |
| 2525 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2526 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2527 | |
| 2528 | I915_WRITE(HWSTAM, 0xeffe); |
| 2529 | for_each_pipe(pipe) |
| 2530 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2531 | I915_WRITE(IMR, 0xffffffff); |
| 2532 | I915_WRITE(IER, 0x0); |
| 2533 | POSTING_READ(IER); |
| 2534 | } |
| 2535 | |
| 2536 | static int i965_irq_postinstall(struct drm_device *dev) |
| 2537 | { |
| 2538 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2539 | u32 enable_mask; |
| 2540 | u32 error_mask; |
| 2541 | |
| 2542 | /* Unmask the interrupts that we always want on. */ |
| 2543 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
| 2544 | I915_DISPLAY_PORT_INTERRUPT | |
| 2545 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
| 2546 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | |
| 2547 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
| 2548 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | |
| 2549 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); |
| 2550 | |
| 2551 | enable_mask = ~dev_priv->irq_mask; |
| 2552 | enable_mask |= I915_USER_INTERRUPT; |
| 2553 | |
| 2554 | if (IS_G4X(dev)) |
| 2555 | enable_mask |= I915_BSD_USER_INTERRUPT; |
| 2556 | |
| 2557 | dev_priv->pipestat[0] = 0; |
| 2558 | dev_priv->pipestat[1] = 0; |
| 2559 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
| 2560 | |
| 2561 | /* |
| 2562 | * Enable some error detection, note the instruction error mask |
| 2563 | * bit is reserved, so we leave it masked. |
| 2564 | */ |
| 2565 | if (IS_G4X(dev)) { |
| 2566 | error_mask = ~(GM45_ERROR_PAGE_TABLE | |
| 2567 | GM45_ERROR_MEM_PRIV | |
| 2568 | GM45_ERROR_CP_PRIV | |
| 2569 | I915_ERROR_MEMORY_REFRESH); |
| 2570 | } else { |
| 2571 | error_mask = ~(I915_ERROR_PAGE_TABLE | |
| 2572 | I915_ERROR_MEMORY_REFRESH); |
| 2573 | } |
| 2574 | I915_WRITE(EMR, error_mask); |
| 2575 | |
| 2576 | I915_WRITE(IMR, dev_priv->irq_mask); |
| 2577 | I915_WRITE(IER, enable_mask); |
| 2578 | POSTING_READ(IER); |
| 2579 | |
| 2580 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2581 | POSTING_READ(PORT_HOTPLUG_EN); |
| 2582 | |
| 2583 | intel_opregion_enable_asle(dev); |
| 2584 | |
| 2585 | return 0; |
| 2586 | } |
| 2587 | |
| 2588 | static void i965_hpd_irq_setup(struct drm_device *dev) |
| 2589 | { |
| 2590 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2591 | u32 hotplug_en; |
| 2592 | |
| 2593 | /* Note HDMI and DP share hotplug bits */ |
| 2594 | hotplug_en = 0; |
| 2595 | if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) |
| 2596 | hotplug_en |= HDMIB_HOTPLUG_INT_EN; |
| 2597 | if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) |
| 2598 | hotplug_en |= HDMIC_HOTPLUG_INT_EN; |
| 2599 | if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) |
| 2600 | hotplug_en |= HDMID_HOTPLUG_INT_EN; |
| 2601 | if (IS_G4X(dev)) { |
| 2602 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_G4X) |
| 2603 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
| 2604 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_G4X) |
| 2605 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; |
| 2606 | } else { |
| 2607 | if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS_I965) |
| 2608 | hotplug_en |= SDVOC_HOTPLUG_INT_EN; |
| 2609 | if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS_I965) |
| 2610 | hotplug_en |= SDVOB_HOTPLUG_INT_EN; |
| 2611 | } |
| 2612 | if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) { |
| 2613 | hotplug_en |= CRT_HOTPLUG_INT_EN; |
| 2614 | |
| 2615 | /* Programming the CRT detection parameters tends |
| 2616 | to generate a spurious hotplug event about three |
| 2617 | seconds later. So just do it once. |
| 2618 | */ |
| 2619 | if (IS_G4X(dev)) |
| 2620 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; |
| 2621 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
| 2622 | } |
| 2623 | |
| 2624 | /* Ignore TV since it's buggy */ |
| 2625 | |
| 2626 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); |
| 2627 | } |
| 2628 | |
| 2629 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
| 2630 | { |
| 2631 | struct drm_device *dev = (struct drm_device *) arg; |
| 2632 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2633 | u32 iir, new_iir; |
| 2634 | u32 pipe_stats[I915_MAX_PIPES]; |
| 2635 | unsigned long irqflags; |
| 2636 | int irq_received; |
| 2637 | int ret = IRQ_NONE, pipe; |
| 2638 | |
| 2639 | atomic_inc(&dev_priv->irq_received); |
| 2640 | |
| 2641 | iir = I915_READ(IIR); |
| 2642 | |
| 2643 | for (;;) { |
| 2644 | bool blc_event = false; |
| 2645 | |
| 2646 | irq_received = iir != 0; |
| 2647 | |
| 2648 | /* Can't rely on pipestat interrupt bit in iir as it might |
| 2649 | * have been cleared after the pipestat interrupt was received. |
| 2650 | * It doesn't set the bit in iir again, but it still produces |
| 2651 | * interrupts (for non-MSI). |
| 2652 | */ |
| 2653 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
| 2654 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
| 2655 | i915_handle_error(dev, false); |
| 2656 | |
| 2657 | for_each_pipe(pipe) { |
| 2658 | int reg = PIPESTAT(pipe); |
| 2659 | pipe_stats[pipe] = I915_READ(reg); |
| 2660 | |
| 2661 | /* |
| 2662 | * Clear the PIPE*STAT regs before the IIR |
| 2663 | */ |
| 2664 | if (pipe_stats[pipe] & 0x8000ffff) { |
| 2665 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) |
| 2666 | DRM_DEBUG_DRIVER("pipe %c underrun\n", |
| 2667 | pipe_name(pipe)); |
| 2668 | I915_WRITE(reg, pipe_stats[pipe]); |
| 2669 | irq_received = 1; |
| 2670 | } |
| 2671 | } |
| 2672 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
| 2673 | |
| 2674 | if (!irq_received) |
| 2675 | break; |
| 2676 | |
| 2677 | ret = IRQ_HANDLED; |
| 2678 | |
| 2679 | /* Consume port. Then clear IIR or we'll miss events */ |
| 2680 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
| 2681 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
| 2682 | |
| 2683 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", |
| 2684 | hotplug_status); |
| 2685 | if (hotplug_status & dev_priv->hotplug_supported_mask) |
| 2686 | queue_work(dev_priv->wq, |
| 2687 | &dev_priv->hotplug_work); |
| 2688 | |
| 2689 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
| 2690 | I915_READ(PORT_HOTPLUG_STAT); |
| 2691 | } |
| 2692 | |
| 2693 | I915_WRITE(IIR, iir); |
| 2694 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
| 2695 | |
| 2696 | if (iir & I915_USER_INTERRUPT) |
| 2697 | notify_ring(dev, &dev_priv->ring[RCS]); |
| 2698 | if (iir & I915_BSD_USER_INTERRUPT) |
| 2699 | notify_ring(dev, &dev_priv->ring[VCS]); |
| 2700 | |
| 2701 | if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) |
| 2702 | intel_prepare_page_flip(dev, 0); |
| 2703 | |
| 2704 | if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT) |
| 2705 | intel_prepare_page_flip(dev, 1); |
| 2706 | |
| 2707 | for_each_pipe(pipe) { |
| 2708 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
| 2709 | drm_handle_vblank(dev, pipe)) { |
| 2710 | i915_pageflip_stall_check(dev, pipe); |
| 2711 | intel_finish_page_flip(dev, pipe); |
| 2712 | } |
| 2713 | |
| 2714 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) |
| 2715 | blc_event = true; |
| 2716 | } |
| 2717 | |
| 2718 | |
| 2719 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
| 2720 | intel_opregion_asle_intr(dev); |
| 2721 | |
| 2722 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
| 2723 | gmbus_irq_handler(dev); |
| 2724 | |
| 2725 | /* With MSI, interrupts are only generated when iir |
| 2726 | * transitions from zero to nonzero. If another bit got |
| 2727 | * set while we were handling the existing iir bits, then |
| 2728 | * we would never get another interrupt. |
| 2729 | * |
| 2730 | * This is fine on non-MSI as well, as if we hit this path |
| 2731 | * we avoid exiting the interrupt handler only to generate |
| 2732 | * another one. |
| 2733 | * |
| 2734 | * Note that for MSI this could cause a stray interrupt report |
| 2735 | * if an interrupt landed in the time between writing IIR and |
| 2736 | * the posting read. This should be rare enough to never |
| 2737 | * trigger the 99% of 100,000 interrupts test for disabling |
| 2738 | * stray interrupts. |
| 2739 | */ |
| 2740 | iir = new_iir; |
| 2741 | } |
| 2742 | |
| 2743 | i915_update_dri1_breadcrumb(dev); |
| 2744 | |
| 2745 | return ret; |
| 2746 | } |
| 2747 | |
| 2748 | static void i965_irq_uninstall(struct drm_device * dev) |
| 2749 | { |
| 2750 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 2751 | int pipe; |
| 2752 | |
| 2753 | if (!dev_priv) |
| 2754 | return; |
| 2755 | |
| 2756 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
| 2757 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); |
| 2758 | |
| 2759 | I915_WRITE(HWSTAM, 0xffffffff); |
| 2760 | for_each_pipe(pipe) |
| 2761 | I915_WRITE(PIPESTAT(pipe), 0); |
| 2762 | I915_WRITE(IMR, 0xffffffff); |
| 2763 | I915_WRITE(IER, 0x0); |
| 2764 | |
| 2765 | for_each_pipe(pipe) |
| 2766 | I915_WRITE(PIPESTAT(pipe), |
| 2767 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); |
| 2768 | I915_WRITE(IIR, I915_READ(IIR)); |
| 2769 | } |
| 2770 | |
| 2771 | void intel_irq_init(struct drm_device *dev) |
| 2772 | { |
| 2773 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2774 | |
| 2775 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); |
| 2776 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
| 2777 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
| 2778 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
| 2779 | |
| 2780 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
| 2781 | i915_hangcheck_elapsed, |
| 2782 | (unsigned long) dev); |
| 2783 | |
| 2784 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
| 2785 | |
| 2786 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
| 2787 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ |
| 2788 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
| 2789 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
| 2790 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; |
| 2791 | } |
| 2792 | |
| 2793 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
| 2794 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; |
| 2795 | else |
| 2796 | dev->driver->get_vblank_timestamp = NULL; |
| 2797 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
| 2798 | |
| 2799 | if (IS_VALLEYVIEW(dev)) { |
| 2800 | dev->driver->irq_handler = valleyview_irq_handler; |
| 2801 | dev->driver->irq_preinstall = valleyview_irq_preinstall; |
| 2802 | dev->driver->irq_postinstall = valleyview_irq_postinstall; |
| 2803 | dev->driver->irq_uninstall = valleyview_irq_uninstall; |
| 2804 | dev->driver->enable_vblank = valleyview_enable_vblank; |
| 2805 | dev->driver->disable_vblank = valleyview_disable_vblank; |
| 2806 | dev_priv->display.hpd_irq_setup = valleyview_hpd_irq_setup; |
| 2807 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
| 2808 | /* Share pre & uninstall handlers with ILK/SNB */ |
| 2809 | dev->driver->irq_handler = ivybridge_irq_handler; |
| 2810 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
| 2811 | dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
| 2812 | dev->driver->irq_uninstall = ironlake_irq_uninstall; |
| 2813 | dev->driver->enable_vblank = ivybridge_enable_vblank; |
| 2814 | dev->driver->disable_vblank = ivybridge_disable_vblank; |
| 2815 | } else if (HAS_PCH_SPLIT(dev)) { |
| 2816 | dev->driver->irq_handler = ironlake_irq_handler; |
| 2817 | dev->driver->irq_preinstall = ironlake_irq_preinstall; |
| 2818 | dev->driver->irq_postinstall = ironlake_irq_postinstall; |
| 2819 | dev->driver->irq_uninstall = ironlake_irq_uninstall; |
| 2820 | dev->driver->enable_vblank = ironlake_enable_vblank; |
| 2821 | dev->driver->disable_vblank = ironlake_disable_vblank; |
| 2822 | } else { |
| 2823 | if (INTEL_INFO(dev)->gen == 2) { |
| 2824 | dev->driver->irq_preinstall = i8xx_irq_preinstall; |
| 2825 | dev->driver->irq_postinstall = i8xx_irq_postinstall; |
| 2826 | dev->driver->irq_handler = i8xx_irq_handler; |
| 2827 | dev->driver->irq_uninstall = i8xx_irq_uninstall; |
| 2828 | } else if (INTEL_INFO(dev)->gen == 3) { |
| 2829 | dev->driver->irq_preinstall = i915_irq_preinstall; |
| 2830 | dev->driver->irq_postinstall = i915_irq_postinstall; |
| 2831 | dev->driver->irq_uninstall = i915_irq_uninstall; |
| 2832 | dev->driver->irq_handler = i915_irq_handler; |
| 2833 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
| 2834 | } else { |
| 2835 | dev->driver->irq_preinstall = i965_irq_preinstall; |
| 2836 | dev->driver->irq_postinstall = i965_irq_postinstall; |
| 2837 | dev->driver->irq_uninstall = i965_irq_uninstall; |
| 2838 | dev->driver->irq_handler = i965_irq_handler; |
| 2839 | dev_priv->display.hpd_irq_setup = i965_hpd_irq_setup; |
| 2840 | } |
| 2841 | dev->driver->enable_vblank = i915_enable_vblank; |
| 2842 | dev->driver->disable_vblank = i915_disable_vblank; |
| 2843 | } |
| 2844 | } |
| 2845 | |
| 2846 | void intel_hpd_init(struct drm_device *dev) |
| 2847 | { |
| 2848 | struct drm_i915_private *dev_priv = dev->dev_private; |
| 2849 | |
| 2850 | if (dev_priv->display.hpd_irq_setup) |
| 2851 | dev_priv->display.hpd_irq_setup(dev); |
| 2852 | } |