Merge commit 'gcl/next' into next
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33 #include "intel_drv.h"
34
35 #define MAX_NOPID ((u32)~0)
36
37 /**
38 * Interrupts that are always left unmasked.
39 *
40 * Since pipe events are edge-triggered from the PIPESTAT register to IIR,
41 * we leave them always unmasked in IMR and then control enabling them through
42 * PIPESTAT alone.
43 */
44 #define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT | \
45 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
46 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
47
48 /** Interrupts that we mask and unmask at runtime. */
49 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
50
51 /** These are all of the interrupts used by the driver */
52 #define I915_INTERRUPT_ENABLE_MASK (I915_INTERRUPT_ENABLE_FIX | \
53 I915_INTERRUPT_ENABLE_VAR)
54
55 #define I915_PIPE_VBLANK_STATUS (PIPE_START_VBLANK_INTERRUPT_STATUS |\
56 PIPE_VBLANK_INTERRUPT_STATUS)
57
58 #define I915_PIPE_VBLANK_ENABLE (PIPE_START_VBLANK_INTERRUPT_ENABLE |\
59 PIPE_VBLANK_INTERRUPT_ENABLE)
60
61 #define DRM_I915_VBLANK_PIPE_ALL (DRM_I915_VBLANK_PIPE_A | \
62 DRM_I915_VBLANK_PIPE_B)
63
64 void
65 i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
66 {
67 if ((dev_priv->irq_mask_reg & mask) != 0) {
68 dev_priv->irq_mask_reg &= ~mask;
69 I915_WRITE(IMR, dev_priv->irq_mask_reg);
70 (void) I915_READ(IMR);
71 }
72 }
73
74 static inline void
75 i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
76 {
77 if ((dev_priv->irq_mask_reg & mask) != mask) {
78 dev_priv->irq_mask_reg |= mask;
79 I915_WRITE(IMR, dev_priv->irq_mask_reg);
80 (void) I915_READ(IMR);
81 }
82 }
83
84 static inline u32
85 i915_pipestat(int pipe)
86 {
87 if (pipe == 0)
88 return PIPEASTAT;
89 if (pipe == 1)
90 return PIPEBSTAT;
91 BUG();
92 }
93
94 void
95 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
96 {
97 if ((dev_priv->pipestat[pipe] & mask) != mask) {
98 u32 reg = i915_pipestat(pipe);
99
100 dev_priv->pipestat[pipe] |= mask;
101 /* Enable the interrupt, clear any pending status */
102 I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
103 (void) I915_READ(reg);
104 }
105 }
106
107 void
108 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
109 {
110 if ((dev_priv->pipestat[pipe] & mask) != 0) {
111 u32 reg = i915_pipestat(pipe);
112
113 dev_priv->pipestat[pipe] &= ~mask;
114 I915_WRITE(reg, dev_priv->pipestat[pipe]);
115 (void) I915_READ(reg);
116 }
117 }
118
119 /**
120 * i915_pipe_enabled - check if a pipe is enabled
121 * @dev: DRM device
122 * @pipe: pipe to check
123 *
124 * Reading certain registers when the pipe is disabled can hang the chip.
125 * Use this routine to make sure the PLL is running and the pipe is active
126 * before reading such registers if unsure.
127 */
128 static int
129 i915_pipe_enabled(struct drm_device *dev, int pipe)
130 {
131 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
132 unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
133
134 if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
135 return 1;
136
137 return 0;
138 }
139
140 /* Called from drm generic code, passed a 'crtc', which
141 * we use as a pipe index
142 */
143 u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
144 {
145 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
146 unsigned long high_frame;
147 unsigned long low_frame;
148 u32 high1, high2, low, count;
149
150 high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
151 low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
152
153 if (!i915_pipe_enabled(dev, pipe)) {
154 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
155 return 0;
156 }
157
158 /*
159 * High & low register fields aren't synchronized, so make sure
160 * we get a low value that's stable across two reads of the high
161 * register.
162 */
163 do {
164 high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
165 PIPE_FRAME_HIGH_SHIFT);
166 low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
167 PIPE_FRAME_LOW_SHIFT);
168 high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
169 PIPE_FRAME_HIGH_SHIFT);
170 } while (high1 != high2);
171
172 count = (high1 << 8) | low;
173
174 return count;
175 }
176
177 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
178 {
179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
180 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
181
182 if (!i915_pipe_enabled(dev, pipe)) {
183 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
184 return 0;
185 }
186
187 return I915_READ(reg);
188 }
189
190 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
191 {
192 struct drm_device *dev = (struct drm_device *) arg;
193 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
194 struct drm_i915_master_private *master_priv;
195 u32 iir, new_iir;
196 u32 pipea_stats, pipeb_stats;
197 u32 vblank_status;
198 u32 vblank_enable;
199 int vblank = 0;
200 unsigned long irqflags;
201 int irq_received;
202 int ret = IRQ_NONE;
203
204 atomic_inc(&dev_priv->irq_received);
205
206 iir = I915_READ(IIR);
207
208 if (IS_I965G(dev)) {
209 vblank_status = I915_START_VBLANK_INTERRUPT_STATUS;
210 vblank_enable = PIPE_START_VBLANK_INTERRUPT_ENABLE;
211 } else {
212 vblank_status = I915_VBLANK_INTERRUPT_STATUS;
213 vblank_enable = I915_VBLANK_INTERRUPT_ENABLE;
214 }
215
216 for (;;) {
217 irq_received = iir != 0;
218
219 /* Can't rely on pipestat interrupt bit in iir as it might
220 * have been cleared after the pipestat interrupt was received.
221 * It doesn't set the bit in iir again, but it still produces
222 * interrupts (for non-MSI).
223 */
224 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
225 pipea_stats = I915_READ(PIPEASTAT);
226 pipeb_stats = I915_READ(PIPEBSTAT);
227
228 /*
229 * Clear the PIPE(A|B)STAT regs before the IIR
230 */
231 if (pipea_stats & 0x8000ffff) {
232 I915_WRITE(PIPEASTAT, pipea_stats);
233 irq_received = 1;
234 }
235
236 if (pipeb_stats & 0x8000ffff) {
237 I915_WRITE(PIPEBSTAT, pipeb_stats);
238 irq_received = 1;
239 }
240 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
241
242 if (!irq_received)
243 break;
244
245 ret = IRQ_HANDLED;
246
247 I915_WRITE(IIR, iir);
248 new_iir = I915_READ(IIR); /* Flush posted writes */
249
250 if (dev->primary->master) {
251 master_priv = dev->primary->master->driver_priv;
252 if (master_priv->sarea_priv)
253 master_priv->sarea_priv->last_dispatch =
254 READ_BREADCRUMB(dev_priv);
255 }
256
257 if (iir & I915_USER_INTERRUPT) {
258 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
259 DRM_WAKEUP(&dev_priv->irq_queue);
260 }
261
262 if (pipea_stats & vblank_status) {
263 vblank++;
264 drm_handle_vblank(dev, 0);
265 }
266
267 if (pipeb_stats & vblank_status) {
268 vblank++;
269 drm_handle_vblank(dev, 1);
270 }
271
272 if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
273 (iir & I915_ASLE_INTERRUPT))
274 opregion_asle_intr(dev);
275
276 /* With MSI, interrupts are only generated when iir
277 * transitions from zero to nonzero. If another bit got
278 * set while we were handling the existing iir bits, then
279 * we would never get another interrupt.
280 *
281 * This is fine on non-MSI as well, as if we hit this path
282 * we avoid exiting the interrupt handler only to generate
283 * another one.
284 *
285 * Note that for MSI this could cause a stray interrupt report
286 * if an interrupt landed in the time between writing IIR and
287 * the posting read. This should be rare enough to never
288 * trigger the 99% of 100,000 interrupts test for disabling
289 * stray interrupts.
290 */
291 iir = new_iir;
292 }
293
294 return ret;
295 }
296
297 static int i915_emit_irq(struct drm_device * dev)
298 {
299 drm_i915_private_t *dev_priv = dev->dev_private;
300 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
301 RING_LOCALS;
302
303 i915_kernel_lost_context(dev);
304
305 DRM_DEBUG("\n");
306
307 dev_priv->counter++;
308 if (dev_priv->counter > 0x7FFFFFFFUL)
309 dev_priv->counter = 1;
310 if (master_priv->sarea_priv)
311 master_priv->sarea_priv->last_enqueue = dev_priv->counter;
312
313 BEGIN_LP_RING(4);
314 OUT_RING(MI_STORE_DWORD_INDEX);
315 OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
316 OUT_RING(dev_priv->counter);
317 OUT_RING(MI_USER_INTERRUPT);
318 ADVANCE_LP_RING();
319
320 return dev_priv->counter;
321 }
322
323 void i915_user_irq_get(struct drm_device *dev)
324 {
325 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
326 unsigned long irqflags;
327
328 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
329 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
330 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
331 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
332 }
333
334 void i915_user_irq_put(struct drm_device *dev)
335 {
336 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
337 unsigned long irqflags;
338
339 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
340 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
341 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
342 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
343 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
344 }
345
346 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
347 {
348 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
349 struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
350 int ret = 0;
351
352 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
353 READ_BREADCRUMB(dev_priv));
354
355 if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
356 if (master_priv->sarea_priv)
357 master_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
358 return 0;
359 }
360
361 if (master_priv->sarea_priv)
362 master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
363
364 i915_user_irq_get(dev);
365 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
366 READ_BREADCRUMB(dev_priv) >= irq_nr);
367 i915_user_irq_put(dev);
368
369 if (ret == -EBUSY) {
370 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
371 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
372 }
373
374 return ret;
375 }
376
377 /* Needs the lock as it touches the ring.
378 */
379 int i915_irq_emit(struct drm_device *dev, void *data,
380 struct drm_file *file_priv)
381 {
382 drm_i915_private_t *dev_priv = dev->dev_private;
383 drm_i915_irq_emit_t *emit = data;
384 int result;
385
386 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
387
388 if (!dev_priv) {
389 DRM_ERROR("called with no initialization\n");
390 return -EINVAL;
391 }
392 mutex_lock(&dev->struct_mutex);
393 result = i915_emit_irq(dev);
394 mutex_unlock(&dev->struct_mutex);
395
396 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
397 DRM_ERROR("copy_to_user\n");
398 return -EFAULT;
399 }
400
401 return 0;
402 }
403
404 /* Doesn't need the hardware lock.
405 */
406 int i915_irq_wait(struct drm_device *dev, void *data,
407 struct drm_file *file_priv)
408 {
409 drm_i915_private_t *dev_priv = dev->dev_private;
410 drm_i915_irq_wait_t *irqwait = data;
411
412 if (!dev_priv) {
413 DRM_ERROR("called with no initialization\n");
414 return -EINVAL;
415 }
416
417 return i915_wait_irq(dev, irqwait->irq_seq);
418 }
419
420 /* Called from drm generic code, passed 'crtc' which
421 * we use as a pipe index
422 */
423 int i915_enable_vblank(struct drm_device *dev, int pipe)
424 {
425 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
426 unsigned long irqflags;
427 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
428 u32 pipeconf;
429
430 pipeconf = I915_READ(pipeconf_reg);
431 if (!(pipeconf & PIPEACONF_ENABLE))
432 return -EINVAL;
433
434 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
435 if (IS_I965G(dev))
436 i915_enable_pipestat(dev_priv, pipe,
437 PIPE_START_VBLANK_INTERRUPT_ENABLE);
438 else
439 i915_enable_pipestat(dev_priv, pipe,
440 PIPE_VBLANK_INTERRUPT_ENABLE);
441 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
442 return 0;
443 }
444
445 /* Called from drm generic code, passed 'crtc' which
446 * we use as a pipe index
447 */
448 void i915_disable_vblank(struct drm_device *dev, int pipe)
449 {
450 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
451 unsigned long irqflags;
452
453 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
454 i915_disable_pipestat(dev_priv, pipe,
455 PIPE_VBLANK_INTERRUPT_ENABLE |
456 PIPE_START_VBLANK_INTERRUPT_ENABLE);
457 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
458 }
459
460 void i915_enable_interrupt (struct drm_device *dev)
461 {
462 struct drm_i915_private *dev_priv = dev->dev_private;
463 opregion_enable_asle(dev);
464 dev_priv->irq_enabled = 1;
465 }
466
467
468 /* Set the vblank monitor pipe
469 */
470 int i915_vblank_pipe_set(struct drm_device *dev, void *data,
471 struct drm_file *file_priv)
472 {
473 drm_i915_private_t *dev_priv = dev->dev_private;
474
475 if (!dev_priv) {
476 DRM_ERROR("called with no initialization\n");
477 return -EINVAL;
478 }
479
480 return 0;
481 }
482
483 int i915_vblank_pipe_get(struct drm_device *dev, void *data,
484 struct drm_file *file_priv)
485 {
486 drm_i915_private_t *dev_priv = dev->dev_private;
487 drm_i915_vblank_pipe_t *pipe = data;
488
489 if (!dev_priv) {
490 DRM_ERROR("called with no initialization\n");
491 return -EINVAL;
492 }
493
494 pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
495
496 return 0;
497 }
498
499 /**
500 * Schedule buffer swap at given vertical blank.
501 */
502 int i915_vblank_swap(struct drm_device *dev, void *data,
503 struct drm_file *file_priv)
504 {
505 /* The delayed swap mechanism was fundamentally racy, and has been
506 * removed. The model was that the client requested a delayed flip/swap
507 * from the kernel, then waited for vblank before continuing to perform
508 * rendering. The problem was that the kernel might wake the client
509 * up before it dispatched the vblank swap (since the lock has to be
510 * held while touching the ringbuffer), in which case the client would
511 * clear and start the next frame before the swap occurred, and
512 * flicker would occur in addition to likely missing the vblank.
513 *
514 * In the absence of this ioctl, userland falls back to a correct path
515 * of waiting for a vblank, then dispatching the swap on its own.
516 * Context switching to userland and back is plenty fast enough for
517 * meeting the requirements of vblank swapping.
518 */
519 return -EINVAL;
520 }
521
522 /* drm_dma.h hooks
523 */
524 void i915_driver_irq_preinstall(struct drm_device * dev)
525 {
526 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
527
528 atomic_set(&dev_priv->irq_received, 0);
529
530 I915_WRITE(HWSTAM, 0xeffe);
531 I915_WRITE(PIPEASTAT, 0);
532 I915_WRITE(PIPEBSTAT, 0);
533 I915_WRITE(IMR, 0xffffffff);
534 I915_WRITE(IER, 0x0);
535 (void) I915_READ(IER);
536 }
537
538 int i915_driver_irq_postinstall(struct drm_device *dev)
539 {
540 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
541
542 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
543
544 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
545
546 /* Unmask the interrupts that we always want on. */
547 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
548
549 dev_priv->pipestat[0] = 0;
550 dev_priv->pipestat[1] = 0;
551
552 /* Disable pipe interrupt enables, clear pending pipe status */
553 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
554 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
555 /* Clear pending interrupt status */
556 I915_WRITE(IIR, I915_READ(IIR));
557
558 I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
559 I915_WRITE(IMR, dev_priv->irq_mask_reg);
560 (void) I915_READ(IER);
561
562 opregion_enable_asle(dev);
563 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
564
565 return 0;
566 }
567
568 void i915_driver_irq_uninstall(struct drm_device * dev)
569 {
570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
571
572 if (!dev_priv)
573 return;
574
575 dev_priv->vblank_pipe = 0;
576
577 I915_WRITE(HWSTAM, 0xffffffff);
578 I915_WRITE(PIPEASTAT, 0);
579 I915_WRITE(PIPEBSTAT, 0);
580 I915_WRITE(IMR, 0xffffffff);
581 I915_WRITE(IER, 0x0);
582
583 I915_WRITE(PIPEASTAT, I915_READ(PIPEASTAT) & 0x8000ffff);
584 I915_WRITE(PIPEBSTAT, I915_READ(PIPEBSTAT) & 0x8000ffff);
585 I915_WRITE(IIR, I915_READ(IIR));
586 }
This page took 0.061148 seconds and 5 git commands to generate.