[IA64] update sn2_defconfig
[deliverable/linux.git] / drivers / char / drm / i915_irq.c
1 /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*-
2 */
3 /*
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 */
28
29 #include "drmP.h"
30 #include "drm.h"
31 #include "i915_drm.h"
32 #include "i915_drv.h"
33
34 #define USER_INT_FLAG (1<<1)
35 #define VSYNC_PIPEB_FLAG (1<<5)
36 #define VSYNC_PIPEA_FLAG (1<<7)
37
38 #define MAX_NOPID ((u32)~0)
39
40 /**
41 * Emit blits for scheduled buffer swaps.
42 *
43 * This function will be called with the HW lock held.
44 */
45 static void i915_vblank_tasklet(struct drm_device *dev)
46 {
47 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
48 unsigned long irqflags;
49 struct list_head *list, *tmp, hits, *hit;
50 int nhits, nrects, slice[2], upper[2], lower[2], i;
51 unsigned counter[2] = { atomic_read(&dev->vbl_received),
52 atomic_read(&dev->vbl_received2) };
53 struct drm_drawable_info *drw;
54 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
55 u32 cpp = dev_priv->cpp;
56 u32 cmd = (cpp == 4) ? (XY_SRC_COPY_BLT_CMD |
57 XY_SRC_COPY_BLT_WRITE_ALPHA |
58 XY_SRC_COPY_BLT_WRITE_RGB)
59 : XY_SRC_COPY_BLT_CMD;
60 u32 pitchropcpp = (sarea_priv->pitch * cpp) | (0xcc << 16) |
61 (cpp << 23) | (1 << 24);
62 RING_LOCALS;
63
64 DRM_DEBUG("\n");
65
66 INIT_LIST_HEAD(&hits);
67
68 nhits = nrects = 0;
69
70 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
71
72 /* Find buffer swaps scheduled for this vertical blank */
73 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
74 drm_i915_vbl_swap_t *vbl_swap =
75 list_entry(list, drm_i915_vbl_swap_t, head);
76
77 if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
78 continue;
79
80 list_del(list);
81 dev_priv->swaps_pending--;
82
83 spin_unlock(&dev_priv->swaps_lock);
84 spin_lock(&dev->drw_lock);
85
86 drw = drm_get_drawable_info(dev, vbl_swap->drw_id);
87
88 if (!drw) {
89 spin_unlock(&dev->drw_lock);
90 drm_free(vbl_swap, sizeof(*vbl_swap), DRM_MEM_DRIVER);
91 spin_lock(&dev_priv->swaps_lock);
92 continue;
93 }
94
95 list_for_each(hit, &hits) {
96 drm_i915_vbl_swap_t *swap_cmp =
97 list_entry(hit, drm_i915_vbl_swap_t, head);
98 struct drm_drawable_info *drw_cmp =
99 drm_get_drawable_info(dev, swap_cmp->drw_id);
100
101 if (drw_cmp &&
102 drw_cmp->rects[0].y1 > drw->rects[0].y1) {
103 list_add_tail(list, hit);
104 break;
105 }
106 }
107
108 spin_unlock(&dev->drw_lock);
109
110 /* List of hits was empty, or we reached the end of it */
111 if (hit == &hits)
112 list_add_tail(list, hits.prev);
113
114 nhits++;
115
116 spin_lock(&dev_priv->swaps_lock);
117 }
118
119 if (nhits == 0) {
120 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
121 return;
122 }
123
124 spin_unlock(&dev_priv->swaps_lock);
125
126 i915_kernel_lost_context(dev);
127
128 BEGIN_LP_RING(6);
129
130 OUT_RING(GFX_OP_DRAWRECT_INFO);
131 OUT_RING(0);
132 OUT_RING(0);
133 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
134 OUT_RING(sarea_priv->width | sarea_priv->height << 16);
135 OUT_RING(0);
136
137 ADVANCE_LP_RING();
138
139 sarea_priv->ctxOwner = DRM_KERNEL_CONTEXT;
140
141 upper[0] = upper[1] = 0;
142 slice[0] = max(sarea_priv->pipeA_h / nhits, 1);
143 slice[1] = max(sarea_priv->pipeB_h / nhits, 1);
144 lower[0] = sarea_priv->pipeA_y + slice[0];
145 lower[1] = sarea_priv->pipeB_y + slice[0];
146
147 spin_lock(&dev->drw_lock);
148
149 /* Emit blits for buffer swaps, partitioning both outputs into as many
150 * slices as there are buffer swaps scheduled in order to avoid tearing
151 * (based on the assumption that a single buffer swap would always
152 * complete before scanout starts).
153 */
154 for (i = 0; i++ < nhits;
155 upper[0] = lower[0], lower[0] += slice[0],
156 upper[1] = lower[1], lower[1] += slice[1]) {
157 if (i == nhits)
158 lower[0] = lower[1] = sarea_priv->height;
159
160 list_for_each(hit, &hits) {
161 drm_i915_vbl_swap_t *swap_hit =
162 list_entry(hit, drm_i915_vbl_swap_t, head);
163 struct drm_clip_rect *rect;
164 int num_rects, pipe;
165 unsigned short top, bottom;
166
167 drw = drm_get_drawable_info(dev, swap_hit->drw_id);
168
169 if (!drw)
170 continue;
171
172 rect = drw->rects;
173 pipe = swap_hit->pipe;
174 top = upper[pipe];
175 bottom = lower[pipe];
176
177 for (num_rects = drw->num_rects; num_rects--; rect++) {
178 int y1 = max(rect->y1, top);
179 int y2 = min(rect->y2, bottom);
180
181 if (y1 >= y2)
182 continue;
183
184 BEGIN_LP_RING(8);
185
186 OUT_RING(cmd);
187 OUT_RING(pitchropcpp);
188 OUT_RING((y1 << 16) | rect->x1);
189 OUT_RING((y2 << 16) | rect->x2);
190 OUT_RING(sarea_priv->front_offset);
191 OUT_RING((y1 << 16) | rect->x1);
192 OUT_RING(pitchropcpp & 0xffff);
193 OUT_RING(sarea_priv->back_offset);
194
195 ADVANCE_LP_RING();
196 }
197 }
198 }
199
200 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
201
202 list_for_each_safe(hit, tmp, &hits) {
203 drm_i915_vbl_swap_t *swap_hit =
204 list_entry(hit, drm_i915_vbl_swap_t, head);
205
206 list_del(hit);
207
208 drm_free(swap_hit, sizeof(*swap_hit), DRM_MEM_DRIVER);
209 }
210 }
211
212 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
213 {
214 struct drm_device *dev = (struct drm_device *) arg;
215 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
216 u16 temp;
217 u32 pipea_stats, pipeb_stats;
218
219 pipea_stats = I915_READ(I915REG_PIPEASTAT);
220 pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
221
222 temp = I915_READ16(I915REG_INT_IDENTITY_R);
223
224 temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
225
226 DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
227
228 if (temp == 0)
229 return IRQ_NONE;
230
231 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
232 (void) I915_READ16(I915REG_INT_IDENTITY_R);
233 DRM_READMEMORYBARRIER();
234
235 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
236
237 if (temp & USER_INT_FLAG)
238 DRM_WAKEUP(&dev_priv->irq_queue);
239
240 if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
241 int vblank_pipe = dev_priv->vblank_pipe;
242
243 if ((vblank_pipe &
244 (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
245 == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
246 if (temp & VSYNC_PIPEA_FLAG)
247 atomic_inc(&dev->vbl_received);
248 if (temp & VSYNC_PIPEB_FLAG)
249 atomic_inc(&dev->vbl_received2);
250 } else if (((temp & VSYNC_PIPEA_FLAG) &&
251 (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
252 ((temp & VSYNC_PIPEB_FLAG) &&
253 (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
254 atomic_inc(&dev->vbl_received);
255
256 DRM_WAKEUP(&dev->vbl_queue);
257 drm_vbl_send_signals(dev);
258
259 if (dev_priv->swaps_pending > 0)
260 drm_locked_tasklet(dev, i915_vblank_tasklet);
261 I915_WRITE(I915REG_PIPEASTAT,
262 pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
263 I915_VBLANK_CLEAR);
264 I915_WRITE(I915REG_PIPEBSTAT,
265 pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
266 I915_VBLANK_CLEAR);
267 }
268
269 return IRQ_HANDLED;
270 }
271
272 static int i915_emit_irq(struct drm_device * dev)
273 {
274 drm_i915_private_t *dev_priv = dev->dev_private;
275 RING_LOCALS;
276
277 i915_kernel_lost_context(dev);
278
279 DRM_DEBUG("%s\n", __FUNCTION__);
280
281 dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
282
283 if (dev_priv->counter > 0x7FFFFFFFUL)
284 dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
285
286 BEGIN_LP_RING(6);
287 OUT_RING(CMD_STORE_DWORD_IDX);
288 OUT_RING(20);
289 OUT_RING(dev_priv->counter);
290 OUT_RING(0);
291 OUT_RING(0);
292 OUT_RING(GFX_OP_USER_INTERRUPT);
293 ADVANCE_LP_RING();
294
295 return dev_priv->counter;
296 }
297
298 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
299 {
300 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
301 int ret = 0;
302
303 DRM_DEBUG("%s irq_nr=%d breadcrumb=%d\n", __FUNCTION__, irq_nr,
304 READ_BREADCRUMB(dev_priv));
305
306 if (READ_BREADCRUMB(dev_priv) >= irq_nr)
307 return 0;
308
309 dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
310
311 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
312 READ_BREADCRUMB(dev_priv) >= irq_nr);
313
314 if (ret == DRM_ERR(EBUSY)) {
315 DRM_ERROR("%s: EBUSY -- rec: %d emitted: %d\n",
316 __FUNCTION__,
317 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
318 }
319
320 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
321 return ret;
322 }
323
324 static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
325 atomic_t *counter)
326 {
327 drm_i915_private_t *dev_priv = dev->dev_private;
328 unsigned int cur_vblank;
329 int ret = 0;
330
331 if (!dev_priv) {
332 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
333 return DRM_ERR(EINVAL);
334 }
335
336 DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
337 (((cur_vblank = atomic_read(counter))
338 - *sequence) <= (1<<23)));
339
340 *sequence = cur_vblank;
341
342 return ret;
343 }
344
345
346 int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
347 {
348 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
349 }
350
351 int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
352 {
353 return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
354 }
355
356 /* Needs the lock as it touches the ring.
357 */
358 int i915_irq_emit(DRM_IOCTL_ARGS)
359 {
360 DRM_DEVICE;
361 drm_i915_private_t *dev_priv = dev->dev_private;
362 drm_i915_irq_emit_t emit;
363 int result;
364
365 LOCK_TEST_WITH_RETURN(dev, filp);
366
367 if (!dev_priv) {
368 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
369 return DRM_ERR(EINVAL);
370 }
371
372 DRM_COPY_FROM_USER_IOCTL(emit, (drm_i915_irq_emit_t __user *) data,
373 sizeof(emit));
374
375 result = i915_emit_irq(dev);
376
377 if (DRM_COPY_TO_USER(emit.irq_seq, &result, sizeof(int))) {
378 DRM_ERROR("copy_to_user\n");
379 return DRM_ERR(EFAULT);
380 }
381
382 return 0;
383 }
384
385 /* Doesn't need the hardware lock.
386 */
387 int i915_irq_wait(DRM_IOCTL_ARGS)
388 {
389 DRM_DEVICE;
390 drm_i915_private_t *dev_priv = dev->dev_private;
391 drm_i915_irq_wait_t irqwait;
392
393 if (!dev_priv) {
394 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
395 return DRM_ERR(EINVAL);
396 }
397
398 DRM_COPY_FROM_USER_IOCTL(irqwait, (drm_i915_irq_wait_t __user *) data,
399 sizeof(irqwait));
400
401 return i915_wait_irq(dev, irqwait.irq_seq);
402 }
403
404 static void i915_enable_interrupt (struct drm_device *dev)
405 {
406 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
407 u16 flag;
408
409 flag = 0;
410 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
411 flag |= VSYNC_PIPEA_FLAG;
412 if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
413 flag |= VSYNC_PIPEB_FLAG;
414
415 I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
416 }
417
418 /* Set the vblank monitor pipe
419 */
420 int i915_vblank_pipe_set(DRM_IOCTL_ARGS)
421 {
422 DRM_DEVICE;
423 drm_i915_private_t *dev_priv = dev->dev_private;
424 drm_i915_vblank_pipe_t pipe;
425
426 if (!dev_priv) {
427 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
428 return DRM_ERR(EINVAL);
429 }
430
431 DRM_COPY_FROM_USER_IOCTL(pipe, (drm_i915_vblank_pipe_t __user *) data,
432 sizeof(pipe));
433
434 if (pipe.pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
435 DRM_ERROR("%s called with invalid pipe 0x%x\n",
436 __FUNCTION__, pipe.pipe);
437 return DRM_ERR(EINVAL);
438 }
439
440 dev_priv->vblank_pipe = pipe.pipe;
441
442 i915_enable_interrupt (dev);
443
444 return 0;
445 }
446
447 int i915_vblank_pipe_get(DRM_IOCTL_ARGS)
448 {
449 DRM_DEVICE;
450 drm_i915_private_t *dev_priv = dev->dev_private;
451 drm_i915_vblank_pipe_t pipe;
452 u16 flag;
453
454 if (!dev_priv) {
455 DRM_ERROR("%s called with no initialization\n", __FUNCTION__);
456 return DRM_ERR(EINVAL);
457 }
458
459 flag = I915_READ(I915REG_INT_ENABLE_R);
460 pipe.pipe = 0;
461 if (flag & VSYNC_PIPEA_FLAG)
462 pipe.pipe |= DRM_I915_VBLANK_PIPE_A;
463 if (flag & VSYNC_PIPEB_FLAG)
464 pipe.pipe |= DRM_I915_VBLANK_PIPE_B;
465 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_pipe_t __user *) data, pipe,
466 sizeof(pipe));
467 return 0;
468 }
469
470 /**
471 * Schedule buffer swap at given vertical blank.
472 */
473 int i915_vblank_swap(DRM_IOCTL_ARGS)
474 {
475 DRM_DEVICE;
476 drm_i915_private_t *dev_priv = dev->dev_private;
477 drm_i915_vblank_swap_t swap;
478 drm_i915_vbl_swap_t *vbl_swap;
479 unsigned int pipe, seqtype, curseq;
480 unsigned long irqflags;
481 struct list_head *list;
482
483 if (!dev_priv) {
484 DRM_ERROR("%s called with no initialization\n", __func__);
485 return DRM_ERR(EINVAL);
486 }
487
488 if (dev_priv->sarea_priv->rotation) {
489 DRM_DEBUG("Rotation not supported\n");
490 return DRM_ERR(EINVAL);
491 }
492
493 DRM_COPY_FROM_USER_IOCTL(swap, (drm_i915_vblank_swap_t __user *) data,
494 sizeof(swap));
495
496 if (swap.seqtype & ~(_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE |
497 _DRM_VBLANK_SECONDARY | _DRM_VBLANK_NEXTONMISS)) {
498 DRM_ERROR("Invalid sequence type 0x%x\n", swap.seqtype);
499 return DRM_ERR(EINVAL);
500 }
501
502 pipe = (swap.seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
503
504 seqtype = swap.seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
505
506 if (!(dev_priv->vblank_pipe & (1 << pipe))) {
507 DRM_ERROR("Invalid pipe %d\n", pipe);
508 return DRM_ERR(EINVAL);
509 }
510
511 spin_lock_irqsave(&dev->drw_lock, irqflags);
512
513 if (!drm_get_drawable_info(dev, swap.drawable)) {
514 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
515 DRM_DEBUG("Invalid drawable ID %d\n", swap.drawable);
516 return DRM_ERR(EINVAL);
517 }
518
519 spin_unlock_irqrestore(&dev->drw_lock, irqflags);
520
521 curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
522
523 if (seqtype == _DRM_VBLANK_RELATIVE)
524 swap.sequence += curseq;
525
526 if ((curseq - swap.sequence) <= (1<<23)) {
527 if (swap.seqtype & _DRM_VBLANK_NEXTONMISS) {
528 swap.sequence = curseq + 1;
529 } else {
530 DRM_DEBUG("Missed target sequence\n");
531 return DRM_ERR(EINVAL);
532 }
533 }
534
535 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
536
537 list_for_each(list, &dev_priv->vbl_swaps.head) {
538 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
539
540 if (vbl_swap->drw_id == swap.drawable &&
541 vbl_swap->pipe == pipe &&
542 vbl_swap->sequence == swap.sequence) {
543 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
544 DRM_DEBUG("Already scheduled\n");
545 return 0;
546 }
547 }
548
549 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
550
551 if (dev_priv->swaps_pending >= 100) {
552 DRM_DEBUG("Too many swaps queued\n");
553 return DRM_ERR(EBUSY);
554 }
555
556 vbl_swap = drm_calloc(1, sizeof(vbl_swap), DRM_MEM_DRIVER);
557
558 if (!vbl_swap) {
559 DRM_ERROR("Failed to allocate memory to queue swap\n");
560 return DRM_ERR(ENOMEM);
561 }
562
563 DRM_DEBUG("\n");
564
565 vbl_swap->drw_id = swap.drawable;
566 vbl_swap->pipe = pipe;
567 vbl_swap->sequence = swap.sequence;
568
569 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
570
571 list_add_tail((struct list_head *)vbl_swap, &dev_priv->vbl_swaps.head);
572 dev_priv->swaps_pending++;
573
574 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
575
576 DRM_COPY_TO_USER_IOCTL((drm_i915_vblank_swap_t __user *) data, swap,
577 sizeof(swap));
578
579 return 0;
580 }
581
582 /* drm_dma.h hooks
583 */
584 void i915_driver_irq_preinstall(struct drm_device * dev)
585 {
586 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
587
588 I915_WRITE16(I915REG_HWSTAM, 0xfffe);
589 I915_WRITE16(I915REG_INT_MASK_R, 0x0);
590 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
591 }
592
593 void i915_driver_irq_postinstall(struct drm_device * dev)
594 {
595 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
596
597 spin_lock_init(&dev_priv->swaps_lock);
598 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
599 dev_priv->swaps_pending = 0;
600
601 if (!dev_priv->vblank_pipe)
602 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
603 i915_enable_interrupt(dev);
604 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
605 }
606
607 void i915_driver_irq_uninstall(struct drm_device * dev)
608 {
609 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
610 u16 temp;
611
612 if (!dev_priv)
613 return;
614
615 I915_WRITE16(I915REG_HWSTAM, 0xffff);
616 I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
617 I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
618
619 temp = I915_READ16(I915REG_INT_IDENTITY_R);
620 I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
621 }
This page took 0.069149 seconds and 5 git commands to generate.