1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
31 #include "drm_crtc_helper.h"
32 #include "drm_fb_helper.h"
33 #include "intel_drv.h"
36 #include "i915_trace.h"
37 #include "../../../platform/x86/intel_ips.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
47 * Sets up the hardware status page for devices that need a physical address
50 static int i915_init_phys_hws(struct drm_device
*dev
)
52 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
53 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
55 /* Program Hardware Status Page */
56 dev_priv
->status_page_dmah
=
57 drm_pci_alloc(dev
, PAGE_SIZE
, PAGE_SIZE
);
59 if (!dev_priv
->status_page_dmah
) {
60 DRM_ERROR("Can not allocate hardware status page\n");
63 ring
->status_page
.page_addr
= dev_priv
->status_page_dmah
->vaddr
;
64 dev_priv
->dma_status_page
= dev_priv
->status_page_dmah
->busaddr
;
66 memset(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
68 if (INTEL_INFO(dev
)->gen
>= 4)
69 dev_priv
->dma_status_page
|= (dev_priv
->dma_status_page
>> 28) &
72 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
73 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
78 * Frees the hardware status page, whether it's a physical address or a virtual
79 * address set up by the X Server.
81 static void i915_free_hws(struct drm_device
*dev
)
83 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
84 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
86 if (dev_priv
->status_page_dmah
) {
87 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
88 dev_priv
->status_page_dmah
= NULL
;
91 if (ring
->status_page
.gfx_addr
) {
92 ring
->status_page
.gfx_addr
= 0;
93 drm_core_ioremapfree(&dev_priv
->hws_map
, dev
);
96 /* Need to rewrite hardware status page */
97 I915_WRITE(HWS_PGA
, 0x1ffff000);
100 void i915_kernel_lost_context(struct drm_device
* dev
)
102 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
103 struct drm_i915_master_private
*master_priv
;
104 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
107 * We should never lose context on the ring with modesetting
108 * as we don't expose it to userspace
110 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
113 ring
->head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
114 ring
->tail
= I915_READ_TAIL(ring
) & TAIL_ADDR
;
115 ring
->space
= ring
->head
- (ring
->tail
+ 8);
117 ring
->space
+= ring
->size
;
119 if (!dev
->primary
->master
)
122 master_priv
= dev
->primary
->master
->driver_priv
;
123 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
124 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
127 static int i915_dma_cleanup(struct drm_device
* dev
)
129 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
132 /* Make sure interrupts are disabled here because the uninstall ioctl
133 * may not have been called from userspace and after dev_private
134 * is freed, it's too late.
136 if (dev
->irq_enabled
)
137 drm_irq_uninstall(dev
);
139 mutex_lock(&dev
->struct_mutex
);
140 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
141 intel_cleanup_ring_buffer(&dev_priv
->ring
[i
]);
142 mutex_unlock(&dev
->struct_mutex
);
144 /* Clear the HWS virtual address at teardown */
145 if (I915_NEED_GFX_HWS(dev
))
151 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
153 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
154 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
157 master_priv
->sarea
= drm_getsarea(dev
);
158 if (master_priv
->sarea
) {
159 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
160 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
162 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
165 if (init
->ring_size
!= 0) {
166 if (LP_RING(dev_priv
)->obj
!= NULL
) {
167 i915_dma_cleanup(dev
);
168 DRM_ERROR("Client tried to initialize ringbuffer in "
173 ret
= intel_render_ring_init_dri(dev
,
177 i915_dma_cleanup(dev
);
182 dev_priv
->cpp
= init
->cpp
;
183 dev_priv
->back_offset
= init
->back_offset
;
184 dev_priv
->front_offset
= init
->front_offset
;
185 dev_priv
->current_page
= 0;
186 if (master_priv
->sarea_priv
)
187 master_priv
->sarea_priv
->pf_current_page
= 0;
189 /* Allow hardware batchbuffers unless told otherwise.
191 dev_priv
->allow_batchbuffer
= 1;
196 static int i915_dma_resume(struct drm_device
* dev
)
198 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
199 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
201 DRM_DEBUG_DRIVER("%s\n", __func__
);
203 if (ring
->map
.handle
== NULL
) {
204 DRM_ERROR("can not ioremap virtual address for"
209 /* Program Hardware Status Page */
210 if (!ring
->status_page
.page_addr
) {
211 DRM_ERROR("Can not find hardware status page\n");
214 DRM_DEBUG_DRIVER("hw status page @ %p\n",
215 ring
->status_page
.page_addr
);
216 if (ring
->status_page
.gfx_addr
!= 0)
217 intel_ring_setup_status_page(ring
);
219 I915_WRITE(HWS_PGA
, dev_priv
->dma_status_page
);
221 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
226 static int i915_dma_init(struct drm_device
*dev
, void *data
,
227 struct drm_file
*file_priv
)
229 drm_i915_init_t
*init
= data
;
232 switch (init
->func
) {
234 retcode
= i915_initialize(dev
, init
);
236 case I915_CLEANUP_DMA
:
237 retcode
= i915_dma_cleanup(dev
);
239 case I915_RESUME_DMA
:
240 retcode
= i915_dma_resume(dev
);
250 /* Implement basically the same security restrictions as hardware does
251 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
253 * Most of the calculations below involve calculating the size of a
254 * particular instruction. It's important to get the size right as
255 * that tells us where the next instruction to check is. Any illegal
256 * instruction detected will be given a size of zero, which is a
257 * signal to abort the rest of the buffer.
259 static int validate_cmd(int cmd
)
261 switch (((cmd
>> 29) & 0x7)) {
263 switch ((cmd
>> 23) & 0x3f) {
265 return 1; /* MI_NOOP */
267 return 1; /* MI_FLUSH */
269 return 0; /* disallow everything else */
273 return 0; /* reserved */
275 return (cmd
& 0xff) + 2; /* 2d commands */
277 if (((cmd
>> 24) & 0x1f) <= 0x18)
280 switch ((cmd
>> 24) & 0x1f) {
284 switch ((cmd
>> 16) & 0xff) {
286 return (cmd
& 0x1f) + 2;
288 return (cmd
& 0xf) + 2;
290 return (cmd
& 0xffff) + 2;
294 return (cmd
& 0xffff) + 1;
298 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
299 return (cmd
& 0x1ffff) + 2;
300 else if (cmd
& (1 << 17)) /* indirect random */
301 if ((cmd
& 0xffff) == 0)
302 return 0; /* unknown length, too hard */
304 return (((cmd
& 0xffff) + 1) / 2) + 1;
306 return 2; /* indirect sequential */
317 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
319 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
322 if ((dwords
+1) * sizeof(int) >= LP_RING(dev_priv
)->size
- 8)
325 for (i
= 0; i
< dwords
;) {
326 int sz
= validate_cmd(buffer
[i
]);
327 if (sz
== 0 || i
+ sz
> dwords
)
332 ret
= BEGIN_LP_RING((dwords
+1)&~1);
336 for (i
= 0; i
< dwords
; i
++)
347 i915_emit_box(struct drm_device
*dev
,
348 struct drm_clip_rect
*box
,
351 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
354 if (box
->y2
<= box
->y1
|| box
->x2
<= box
->x1
||
355 box
->y2
<= 0 || box
->x2
<= 0) {
356 DRM_ERROR("Bad box %d,%d..%d,%d\n",
357 box
->x1
, box
->y1
, box
->x2
, box
->y2
);
361 if (INTEL_INFO(dev
)->gen
>= 4) {
362 ret
= BEGIN_LP_RING(4);
366 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
367 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
368 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
371 ret
= BEGIN_LP_RING(6);
375 OUT_RING(GFX_OP_DRAWRECT_INFO
);
377 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
378 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
387 /* XXX: Emitting the counter should really be moved to part of the IRQ
388 * emit. For now, do it in both places:
391 static void i915_emit_breadcrumb(struct drm_device
*dev
)
393 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
394 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
397 if (dev_priv
->counter
> 0x7FFFFFFFUL
)
398 dev_priv
->counter
= 0;
399 if (master_priv
->sarea_priv
)
400 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
;
402 if (BEGIN_LP_RING(4) == 0) {
403 OUT_RING(MI_STORE_DWORD_INDEX
);
404 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
405 OUT_RING(dev_priv
->counter
);
411 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
412 drm_i915_cmdbuffer_t
*cmd
,
413 struct drm_clip_rect
*cliprects
,
416 int nbox
= cmd
->num_cliprects
;
417 int i
= 0, count
, ret
;
420 DRM_ERROR("alignment");
424 i915_kernel_lost_context(dev
);
426 count
= nbox
? nbox
: 1;
428 for (i
= 0; i
< count
; i
++) {
430 ret
= i915_emit_box(dev
, &cliprects
[i
],
436 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
441 i915_emit_breadcrumb(dev
);
445 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
446 drm_i915_batchbuffer_t
* batch
,
447 struct drm_clip_rect
*cliprects
)
449 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
450 int nbox
= batch
->num_cliprects
;
453 if ((batch
->start
| batch
->used
) & 0x7) {
454 DRM_ERROR("alignment");
458 i915_kernel_lost_context(dev
);
460 count
= nbox
? nbox
: 1;
461 for (i
= 0; i
< count
; i
++) {
463 ret
= i915_emit_box(dev
, &cliprects
[i
],
464 batch
->DR1
, batch
->DR4
);
469 if (!IS_I830(dev
) && !IS_845G(dev
)) {
470 ret
= BEGIN_LP_RING(2);
474 if (INTEL_INFO(dev
)->gen
>= 4) {
475 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
476 OUT_RING(batch
->start
);
478 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
479 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
482 ret
= BEGIN_LP_RING(4);
486 OUT_RING(MI_BATCH_BUFFER
);
487 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
488 OUT_RING(batch
->start
+ batch
->used
- 4);
495 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
496 if (BEGIN_LP_RING(2) == 0) {
497 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
503 i915_emit_breadcrumb(dev
);
507 static int i915_dispatch_flip(struct drm_device
* dev
)
509 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
510 struct drm_i915_master_private
*master_priv
=
511 dev
->primary
->master
->driver_priv
;
514 if (!master_priv
->sarea_priv
)
517 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
519 dev_priv
->current_page
,
520 master_priv
->sarea_priv
->pf_current_page
);
522 i915_kernel_lost_context(dev
);
524 ret
= BEGIN_LP_RING(10);
528 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
531 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
533 if (dev_priv
->current_page
== 0) {
534 OUT_RING(dev_priv
->back_offset
);
535 dev_priv
->current_page
= 1;
537 OUT_RING(dev_priv
->front_offset
);
538 dev_priv
->current_page
= 0;
542 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
547 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->counter
++;
549 if (BEGIN_LP_RING(4) == 0) {
550 OUT_RING(MI_STORE_DWORD_INDEX
);
551 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
552 OUT_RING(dev_priv
->counter
);
557 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->current_page
;
561 static int i915_quiescent(struct drm_device
*dev
)
563 struct intel_ring_buffer
*ring
= LP_RING(dev
->dev_private
);
565 i915_kernel_lost_context(dev
);
566 return intel_wait_ring_buffer(ring
, ring
->size
- 8);
569 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
570 struct drm_file
*file_priv
)
574 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
576 mutex_lock(&dev
->struct_mutex
);
577 ret
= i915_quiescent(dev
);
578 mutex_unlock(&dev
->struct_mutex
);
583 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
584 struct drm_file
*file_priv
)
586 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
587 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
588 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
589 master_priv
->sarea_priv
;
590 drm_i915_batchbuffer_t
*batch
= data
;
592 struct drm_clip_rect
*cliprects
= NULL
;
594 if (!dev_priv
->allow_batchbuffer
) {
595 DRM_ERROR("Batchbuffer ioctl disabled\n");
599 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
600 batch
->start
, batch
->used
, batch
->num_cliprects
);
602 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
604 if (batch
->num_cliprects
< 0)
607 if (batch
->num_cliprects
) {
608 cliprects
= kcalloc(batch
->num_cliprects
,
609 sizeof(struct drm_clip_rect
),
611 if (cliprects
== NULL
)
614 ret
= copy_from_user(cliprects
, batch
->cliprects
,
615 batch
->num_cliprects
*
616 sizeof(struct drm_clip_rect
));
623 mutex_lock(&dev
->struct_mutex
);
624 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
625 mutex_unlock(&dev
->struct_mutex
);
628 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
636 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
637 struct drm_file
*file_priv
)
639 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
640 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
641 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
642 master_priv
->sarea_priv
;
643 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
644 struct drm_clip_rect
*cliprects
= NULL
;
648 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
649 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
651 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
653 if (cmdbuf
->num_cliprects
< 0)
656 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
657 if (batch_data
== NULL
)
660 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
663 goto fail_batch_free
;
666 if (cmdbuf
->num_cliprects
) {
667 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
668 sizeof(struct drm_clip_rect
), GFP_KERNEL
);
669 if (cliprects
== NULL
) {
671 goto fail_batch_free
;
674 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
675 cmdbuf
->num_cliprects
*
676 sizeof(struct drm_clip_rect
));
683 mutex_lock(&dev
->struct_mutex
);
684 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
685 mutex_unlock(&dev
->struct_mutex
);
687 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
692 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
702 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
703 struct drm_file
*file_priv
)
707 DRM_DEBUG_DRIVER("%s\n", __func__
);
709 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
711 mutex_lock(&dev
->struct_mutex
);
712 ret
= i915_dispatch_flip(dev
);
713 mutex_unlock(&dev
->struct_mutex
);
718 static int i915_getparam(struct drm_device
*dev
, void *data
,
719 struct drm_file
*file_priv
)
721 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
722 drm_i915_getparam_t
*param
= data
;
726 DRM_ERROR("called with no initialization\n");
730 switch (param
->param
) {
731 case I915_PARAM_IRQ_ACTIVE
:
732 value
= dev
->pdev
->irq
? 1 : 0;
734 case I915_PARAM_ALLOW_BATCHBUFFER
:
735 value
= dev_priv
->allow_batchbuffer
? 1 : 0;
737 case I915_PARAM_LAST_DISPATCH
:
738 value
= READ_BREADCRUMB(dev_priv
);
740 case I915_PARAM_CHIPSET_ID
:
741 value
= dev
->pci_device
;
743 case I915_PARAM_HAS_GEM
:
744 value
= dev_priv
->has_gem
;
746 case I915_PARAM_NUM_FENCES_AVAIL
:
747 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
749 case I915_PARAM_HAS_OVERLAY
:
750 value
= dev_priv
->overlay
? 1 : 0;
752 case I915_PARAM_HAS_PAGEFLIPPING
:
755 case I915_PARAM_HAS_EXECBUF2
:
757 value
= dev_priv
->has_gem
;
759 case I915_PARAM_HAS_BSD
:
760 value
= HAS_BSD(dev
);
762 case I915_PARAM_HAS_BLT
:
763 value
= HAS_BLT(dev
);
765 case I915_PARAM_HAS_RELAXED_FENCING
:
768 case I915_PARAM_HAS_COHERENT_RINGS
:
771 case I915_PARAM_HAS_EXEC_CONSTANTS
:
772 value
= INTEL_INFO(dev
)->gen
>= 4;
775 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
780 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
781 DRM_ERROR("DRM_COPY_TO_USER failed\n");
788 static int i915_setparam(struct drm_device
*dev
, void *data
,
789 struct drm_file
*file_priv
)
791 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
792 drm_i915_setparam_t
*param
= data
;
795 DRM_ERROR("called with no initialization\n");
799 switch (param
->param
) {
800 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
802 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
803 dev_priv
->tex_lru_log_granularity
= param
->value
;
805 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
806 dev_priv
->allow_batchbuffer
= param
->value
;
808 case I915_SETPARAM_NUM_USED_FENCES
:
809 if (param
->value
> dev_priv
->num_fence_regs
||
812 /* Userspace can use first N regs */
813 dev_priv
->fence_reg_start
= param
->value
;
816 DRM_DEBUG_DRIVER("unknown parameter %d\n",
824 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
825 struct drm_file
*file_priv
)
827 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
828 drm_i915_hws_addr_t
*hws
= data
;
829 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
831 if (!I915_NEED_GFX_HWS(dev
))
835 DRM_ERROR("called with no initialization\n");
839 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
840 WARN(1, "tried to set status page when mode setting active\n");
844 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
846 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
848 dev_priv
->hws_map
.offset
= dev
->agp
->base
+ hws
->addr
;
849 dev_priv
->hws_map
.size
= 4*1024;
850 dev_priv
->hws_map
.type
= 0;
851 dev_priv
->hws_map
.flags
= 0;
852 dev_priv
->hws_map
.mtrr
= 0;
854 drm_core_ioremap_wc(&dev_priv
->hws_map
, dev
);
855 if (dev_priv
->hws_map
.handle
== NULL
) {
856 i915_dma_cleanup(dev
);
857 ring
->status_page
.gfx_addr
= 0;
858 DRM_ERROR("can not ioremap virtual address for"
859 " G33 hw status page\n");
862 ring
->status_page
.page_addr
= dev_priv
->hws_map
.handle
;
863 memset(ring
->status_page
.page_addr
, 0, PAGE_SIZE
);
864 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
866 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
867 ring
->status_page
.gfx_addr
);
868 DRM_DEBUG_DRIVER("load hws at %p\n",
869 ring
->status_page
.page_addr
);
873 static int i915_get_bridge_dev(struct drm_device
*dev
)
875 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
877 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
878 if (!dev_priv
->bridge_dev
) {
879 DRM_ERROR("bridge device not found\n");
885 #define MCHBAR_I915 0x44
886 #define MCHBAR_I965 0x48
887 #define MCHBAR_SIZE (4*4096)
889 #define DEVEN_REG 0x54
890 #define DEVEN_MCHBAR_EN (1 << 28)
892 /* Allocate space for the MCH regs if needed, return nonzero on error */
894 intel_alloc_mchbar_resource(struct drm_device
*dev
)
896 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
897 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
898 u32 temp_lo
, temp_hi
= 0;
902 if (INTEL_INFO(dev
)->gen
>= 4)
903 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
904 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
905 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
907 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
910 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
914 /* Get some space for it */
915 dev_priv
->mch_res
.name
= "i915 MCHBAR";
916 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
917 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
919 MCHBAR_SIZE
, MCHBAR_SIZE
,
921 0, pcibios_align_resource
,
922 dev_priv
->bridge_dev
);
924 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
925 dev_priv
->mch_res
.start
= 0;
929 if (INTEL_INFO(dev
)->gen
>= 4)
930 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
931 upper_32_bits(dev_priv
->mch_res
.start
));
933 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
934 lower_32_bits(dev_priv
->mch_res
.start
));
938 /* Setup MCHBAR if possible, return true if we should disable it again */
940 intel_setup_mchbar(struct drm_device
*dev
)
942 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
943 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
947 dev_priv
->mchbar_need_disable
= false;
949 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
950 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
951 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
953 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
957 /* If it's already enabled, don't have to do anything */
961 if (intel_alloc_mchbar_resource(dev
))
964 dev_priv
->mchbar_need_disable
= true;
966 /* Space is allocated or reserved, so enable it. */
967 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
968 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
969 temp
| DEVEN_MCHBAR_EN
);
971 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
972 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
977 intel_teardown_mchbar(struct drm_device
*dev
)
979 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
980 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
983 if (dev_priv
->mchbar_need_disable
) {
984 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
985 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
986 temp
&= ~DEVEN_MCHBAR_EN
;
987 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
989 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
991 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
995 if (dev_priv
->mch_res
.start
)
996 release_resource(&dev_priv
->mch_res
);
999 #define PTE_ADDRESS_MASK 0xfffff000
1000 #define PTE_ADDRESS_MASK_HIGH 0x000000f0 /* i915+ */
1001 #define PTE_MAPPING_TYPE_UNCACHED (0 << 1)
1002 #define PTE_MAPPING_TYPE_DCACHE (1 << 1) /* i830 only */
1003 #define PTE_MAPPING_TYPE_CACHED (3 << 1)
1004 #define PTE_MAPPING_TYPE_MASK (3 << 1)
1005 #define PTE_VALID (1 << 0)
1008 * i915_stolen_to_phys - take an offset into stolen memory and turn it into
1011 * @offset: address to translate
1013 * Some chip functions require allocations from stolen space and need the
1014 * physical address of the memory in question.
1016 static unsigned long i915_stolen_to_phys(struct drm_device
*dev
, u32 offset
)
1018 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1019 struct pci_dev
*pdev
= dev_priv
->bridge_dev
;
1023 /* On the machines I have tested the Graphics Base of Stolen Memory
1024 * is unreliable, so compute the base by subtracting the stolen memory
1025 * from the Top of Low Usable DRAM which is where the BIOS places
1026 * the graphics stolen memory.
1028 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1029 /* top 32bits are reserved = 0 */
1030 pci_read_config_dword(pdev
, 0xA4, &base
);
1032 /* XXX presume 8xx is the same as i915 */
1033 pci_bus_read_config_dword(pdev
->bus
, 2, 0x5C, &base
);
1036 if (INTEL_INFO(dev
)->gen
> 3 || IS_G33(dev
)) {
1038 pci_read_config_word(pdev
, 0xb0, &val
);
1039 base
= val
>> 4 << 20;
1042 pci_read_config_byte(pdev
, 0x9c, &val
);
1043 base
= val
>> 3 << 27;
1045 base
-= dev_priv
->mm
.gtt
->stolen_size
;
1048 return base
+ offset
;
1051 static void i915_warn_stolen(struct drm_device
*dev
)
1053 DRM_ERROR("not enough stolen space for compressed buffer, disabling\n");
1054 DRM_ERROR("hint: you may be able to increase stolen memory size in the BIOS to avoid this\n");
1057 static void i915_setup_compression(struct drm_device
*dev
, int size
)
1059 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1060 struct drm_mm_node
*compressed_fb
, *uninitialized_var(compressed_llb
);
1061 unsigned long cfb_base
;
1062 unsigned long ll_base
= 0;
1064 compressed_fb
= drm_mm_search_free(&dev_priv
->mm
.stolen
, size
, 4096, 0);
1066 compressed_fb
= drm_mm_get_block(compressed_fb
, size
, 4096);
1070 cfb_base
= i915_stolen_to_phys(dev
, compressed_fb
->start
);
1074 if (!(IS_GM45(dev
) || HAS_PCH_SPLIT(dev
))) {
1075 compressed_llb
= drm_mm_search_free(&dev_priv
->mm
.stolen
,
1078 compressed_llb
= drm_mm_get_block(compressed_llb
,
1080 if (!compressed_llb
)
1083 ll_base
= i915_stolen_to_phys(dev
, compressed_llb
->start
);
1088 dev_priv
->cfb_size
= size
;
1090 intel_disable_fbc(dev
);
1091 dev_priv
->compressed_fb
= compressed_fb
;
1092 if (HAS_PCH_SPLIT(dev
))
1093 I915_WRITE(ILK_DPFC_CB_BASE
, compressed_fb
->start
);
1094 else if (IS_GM45(dev
)) {
1095 I915_WRITE(DPFC_CB_BASE
, compressed_fb
->start
);
1097 I915_WRITE(FBC_CFB_BASE
, cfb_base
);
1098 I915_WRITE(FBC_LL_BASE
, ll_base
);
1099 dev_priv
->compressed_llb
= compressed_llb
;
1102 DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
1103 cfb_base
, ll_base
, size
>> 20);
1107 drm_mm_put_block(compressed_llb
);
1109 drm_mm_put_block(compressed_fb
);
1111 dev_priv
->no_fbc_reason
= FBC_STOLEN_TOO_SMALL
;
1112 i915_warn_stolen(dev
);
1115 static void i915_cleanup_compression(struct drm_device
*dev
)
1117 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1119 drm_mm_put_block(dev_priv
->compressed_fb
);
1120 if (dev_priv
->compressed_llb
)
1121 drm_mm_put_block(dev_priv
->compressed_llb
);
1124 /* true = enable decode, false = disable decoder */
1125 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1127 struct drm_device
*dev
= cookie
;
1129 intel_modeset_vga_set_state(dev
, state
);
1131 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1132 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1134 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1137 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1139 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1140 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1141 if (state
== VGA_SWITCHEROO_ON
) {
1142 printk(KERN_INFO
"i915: switched on\n");
1143 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1144 /* i915 resume handler doesn't set to D0 */
1145 pci_set_power_state(dev
->pdev
, PCI_D0
);
1147 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1149 printk(KERN_ERR
"i915: switched off\n");
1150 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1151 i915_suspend(dev
, pmm
);
1152 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1156 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1158 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1161 spin_lock(&dev
->count_lock
);
1162 can_switch
= (dev
->open_count
== 0);
1163 spin_unlock(&dev
->count_lock
);
1167 static int i915_load_modeset_init(struct drm_device
*dev
)
1169 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1170 unsigned long prealloc_size
, gtt_size
, mappable_size
;
1173 prealloc_size
= dev_priv
->mm
.gtt
->stolen_size
;
1174 gtt_size
= dev_priv
->mm
.gtt
->gtt_total_entries
<< PAGE_SHIFT
;
1175 mappable_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1177 /* Basic memrange allocator for stolen space */
1178 drm_mm_init(&dev_priv
->mm
.stolen
, 0, prealloc_size
);
1180 /* Let GEM Manage all of the aperture.
1182 * However, leave one page at the end still bound to the scratch page.
1183 * There are a number of places where the hardware apparently
1184 * prefetches past the end of the object, and we've seen multiple
1185 * hangs with the GPU head pointer stuck in a batchbuffer bound
1186 * at the last page of the aperture. One page should be enough to
1187 * keep any prefetching inside of the aperture.
1189 i915_gem_do_init(dev
, 0, mappable_size
, gtt_size
- PAGE_SIZE
);
1191 mutex_lock(&dev
->struct_mutex
);
1192 ret
= i915_gem_init_ringbuffer(dev
);
1193 mutex_unlock(&dev
->struct_mutex
);
1197 /* Try to set up FBC with a reasonable compressed buffer size */
1198 if (I915_HAS_FBC(dev
) && i915_powersave
) {
1201 /* Leave 1M for line length buffer & misc. */
1203 /* Try to get a 32M buffer... */
1204 if (prealloc_size
> (36*1024*1024))
1205 cfb_size
= 32*1024*1024;
1206 else /* fall back to 7/8 of the stolen space */
1207 cfb_size
= prealloc_size
* 7 / 8;
1208 i915_setup_compression(dev
, cfb_size
);
1211 /* Allow hardware batchbuffers unless told otherwise. */
1212 dev_priv
->allow_batchbuffer
= 1;
1214 ret
= intel_parse_bios(dev
);
1216 DRM_INFO("failed to find VBIOS tables\n");
1218 /* if we have > 1 VGA cards, then disable the radeon VGA resources */
1219 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1221 goto cleanup_ringbuffer
;
1223 intel_register_dsm_handler();
1225 ret
= vga_switcheroo_register_client(dev
->pdev
,
1226 i915_switcheroo_set_state
,
1228 i915_switcheroo_can_switch
);
1230 goto cleanup_vga_client
;
1232 /* IIR "flip pending" bit means done if this bit is set */
1233 if (IS_GEN3(dev
) && (I915_READ(ECOSKPD
) & ECO_FLIP_DONE
))
1234 dev_priv
->flip_pending_is_done
= true;
1236 intel_modeset_init(dev
);
1238 ret
= drm_irq_install(dev
);
1240 goto cleanup_vga_switcheroo
;
1242 /* Always safe in the mode setting case. */
1243 /* FIXME: do pre/post-mode set stuff in core KMS code */
1244 dev
->vblank_disable_allowed
= 1;
1246 ret
= intel_fbdev_init(dev
);
1250 drm_kms_helper_poll_init(dev
);
1252 /* We're off and running w/KMS */
1253 dev_priv
->mm
.suspended
= 0;
1258 drm_irq_uninstall(dev
);
1259 cleanup_vga_switcheroo
:
1260 vga_switcheroo_unregister_client(dev
->pdev
);
1262 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1264 mutex_lock(&dev
->struct_mutex
);
1265 i915_gem_cleanup_ringbuffer(dev
);
1266 mutex_unlock(&dev
->struct_mutex
);
1271 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1273 struct drm_i915_master_private
*master_priv
;
1275 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1279 master
->driver_priv
= master_priv
;
1283 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1285 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1292 master
->driver_priv
= NULL
;
1295 static void i915_pineview_get_mem_freq(struct drm_device
*dev
)
1297 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1300 tmp
= I915_READ(CLKCFG
);
1302 switch (tmp
& CLKCFG_FSB_MASK
) {
1303 case CLKCFG_FSB_533
:
1304 dev_priv
->fsb_freq
= 533; /* 133*4 */
1306 case CLKCFG_FSB_800
:
1307 dev_priv
->fsb_freq
= 800; /* 200*4 */
1309 case CLKCFG_FSB_667
:
1310 dev_priv
->fsb_freq
= 667; /* 167*4 */
1312 case CLKCFG_FSB_400
:
1313 dev_priv
->fsb_freq
= 400; /* 100*4 */
1317 switch (tmp
& CLKCFG_MEM_MASK
) {
1318 case CLKCFG_MEM_533
:
1319 dev_priv
->mem_freq
= 533;
1321 case CLKCFG_MEM_667
:
1322 dev_priv
->mem_freq
= 667;
1324 case CLKCFG_MEM_800
:
1325 dev_priv
->mem_freq
= 800;
1329 /* detect pineview DDR3 setting */
1330 tmp
= I915_READ(CSHRDDR3CTL
);
1331 dev_priv
->is_ddr3
= (tmp
& CSHRDDR3CTL_DDR3
) ? 1 : 0;
1334 static void i915_ironlake_get_mem_freq(struct drm_device
*dev
)
1336 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1339 ddrpll
= I915_READ16(DDRMPLL1
);
1340 csipll
= I915_READ16(CSIPLL0
);
1342 switch (ddrpll
& 0xff) {
1344 dev_priv
->mem_freq
= 800;
1347 dev_priv
->mem_freq
= 1066;
1350 dev_priv
->mem_freq
= 1333;
1353 dev_priv
->mem_freq
= 1600;
1356 DRM_DEBUG_DRIVER("unknown memory frequency 0x%02x\n",
1358 dev_priv
->mem_freq
= 0;
1362 dev_priv
->r_t
= dev_priv
->mem_freq
;
1364 switch (csipll
& 0x3ff) {
1366 dev_priv
->fsb_freq
= 3200;
1369 dev_priv
->fsb_freq
= 3733;
1372 dev_priv
->fsb_freq
= 4266;
1375 dev_priv
->fsb_freq
= 4800;
1378 dev_priv
->fsb_freq
= 5333;
1381 dev_priv
->fsb_freq
= 5866;
1384 dev_priv
->fsb_freq
= 6400;
1387 DRM_DEBUG_DRIVER("unknown fsb frequency 0x%04x\n",
1389 dev_priv
->fsb_freq
= 0;
1393 if (dev_priv
->fsb_freq
== 3200) {
1395 } else if (dev_priv
->fsb_freq
> 3200 && dev_priv
->fsb_freq
<= 4800) {
1402 static const struct cparams
{
1408 { 1, 1333, 301, 28664 },
1409 { 1, 1066, 294, 24460 },
1410 { 1, 800, 294, 25192 },
1411 { 0, 1333, 276, 27605 },
1412 { 0, 1066, 276, 27605 },
1413 { 0, 800, 231, 23784 },
1416 unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
)
1418 u64 total_count
, diff
, ret
;
1419 u32 count1
, count2
, count3
, m
= 0, c
= 0;
1420 unsigned long now
= jiffies_to_msecs(jiffies
), diff1
;
1423 diff1
= now
- dev_priv
->last_time1
;
1425 count1
= I915_READ(DMIEC
);
1426 count2
= I915_READ(DDREC
);
1427 count3
= I915_READ(CSIEC
);
1429 total_count
= count1
+ count2
+ count3
;
1431 /* FIXME: handle per-counter overflow */
1432 if (total_count
< dev_priv
->last_count1
) {
1433 diff
= ~0UL - dev_priv
->last_count1
;
1434 diff
+= total_count
;
1436 diff
= total_count
- dev_priv
->last_count1
;
1439 for (i
= 0; i
< ARRAY_SIZE(cparams
); i
++) {
1440 if (cparams
[i
].i
== dev_priv
->c_m
&&
1441 cparams
[i
].t
== dev_priv
->r_t
) {
1448 diff
= div_u64(diff
, diff1
);
1449 ret
= ((m
* diff
) + c
);
1450 ret
= div_u64(ret
, 10);
1452 dev_priv
->last_count1
= total_count
;
1453 dev_priv
->last_time1
= now
;
1458 unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
)
1460 unsigned long m
, x
, b
;
1463 tsfs
= I915_READ(TSFS
);
1465 m
= ((tsfs
& TSFS_SLOPE_MASK
) >> TSFS_SLOPE_SHIFT
);
1466 x
= I915_READ8(TR1
);
1468 b
= tsfs
& TSFS_INTR_MASK
;
1470 return ((m
* x
) / 127) - b
;
1473 static u16
pvid_to_extvid(struct drm_i915_private
*dev_priv
, u8 pxvid
)
1475 static const struct v_table
{
1476 u16 vd
; /* in .1 mil */
1477 u16 vm
; /* in .1 mil */
1608 if (dev_priv
->info
->is_mobile
)
1609 return v_table
[pxvid
].vm
;
1611 return v_table
[pxvid
].vd
;
1614 void i915_update_gfx_val(struct drm_i915_private
*dev_priv
)
1616 struct timespec now
, diff1
;
1618 unsigned long diffms
;
1621 getrawmonotonic(&now
);
1622 diff1
= timespec_sub(now
, dev_priv
->last_time2
);
1624 /* Don't divide by 0 */
1625 diffms
= diff1
.tv_sec
* 1000 + diff1
.tv_nsec
/ 1000000;
1629 count
= I915_READ(GFXEC
);
1631 if (count
< dev_priv
->last_count2
) {
1632 diff
= ~0UL - dev_priv
->last_count2
;
1635 diff
= count
- dev_priv
->last_count2
;
1638 dev_priv
->last_count2
= count
;
1639 dev_priv
->last_time2
= now
;
1641 /* More magic constants... */
1643 diff
= div_u64(diff
, diffms
* 10);
1644 dev_priv
->gfx_power
= diff
;
1647 unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
)
1649 unsigned long t
, corr
, state1
, corr2
, state2
;
1652 pxvid
= I915_READ(PXVFREQ_BASE
+ (dev_priv
->cur_delay
* 4));
1653 pxvid
= (pxvid
>> 24) & 0x7f;
1654 ext_v
= pvid_to_extvid(dev_priv
, pxvid
);
1658 t
= i915_mch_val(dev_priv
);
1660 /* Revel in the empirically derived constants */
1662 /* Correction factor in 1/100000 units */
1664 corr
= ((t
* 2349) + 135940);
1666 corr
= ((t
* 964) + 29317);
1668 corr
= ((t
* 301) + 1004);
1670 corr
= corr
* ((150142 * state1
) / 10000 - 78642);
1672 corr2
= (corr
* dev_priv
->corr
);
1674 state2
= (corr2
* state1
) / 10000;
1675 state2
/= 100; /* convert to mW */
1677 i915_update_gfx_val(dev_priv
);
1679 return dev_priv
->gfx_power
+ state2
;
1682 /* Global for IPS driver to get at the current i915 device */
1683 static struct drm_i915_private
*i915_mch_dev
;
1685 * Lock protecting IPS related data structures
1687 * - dev_priv->max_delay
1688 * - dev_priv->min_delay
1690 * - dev_priv->gpu_busy
1692 static DEFINE_SPINLOCK(mchdev_lock
);
1695 * i915_read_mch_val - return value for IPS use
1697 * Calculate and return a value for the IPS driver to use when deciding whether
1698 * we have thermal and power headroom to increase CPU or GPU power budget.
1700 unsigned long i915_read_mch_val(void)
1702 struct drm_i915_private
*dev_priv
;
1703 unsigned long chipset_val
, graphics_val
, ret
= 0;
1705 spin_lock(&mchdev_lock
);
1708 dev_priv
= i915_mch_dev
;
1710 chipset_val
= i915_chipset_val(dev_priv
);
1711 graphics_val
= i915_gfx_val(dev_priv
);
1713 ret
= chipset_val
+ graphics_val
;
1716 spin_unlock(&mchdev_lock
);
1720 EXPORT_SYMBOL_GPL(i915_read_mch_val
);
1723 * i915_gpu_raise - raise GPU frequency limit
1725 * Raise the limit; IPS indicates we have thermal headroom.
1727 bool i915_gpu_raise(void)
1729 struct drm_i915_private
*dev_priv
;
1732 spin_lock(&mchdev_lock
);
1733 if (!i915_mch_dev
) {
1737 dev_priv
= i915_mch_dev
;
1739 if (dev_priv
->max_delay
> dev_priv
->fmax
)
1740 dev_priv
->max_delay
--;
1743 spin_unlock(&mchdev_lock
);
1747 EXPORT_SYMBOL_GPL(i915_gpu_raise
);
1750 * i915_gpu_lower - lower GPU frequency limit
1752 * IPS indicates we're close to a thermal limit, so throttle back the GPU
1753 * frequency maximum.
1755 bool i915_gpu_lower(void)
1757 struct drm_i915_private
*dev_priv
;
1760 spin_lock(&mchdev_lock
);
1761 if (!i915_mch_dev
) {
1765 dev_priv
= i915_mch_dev
;
1767 if (dev_priv
->max_delay
< dev_priv
->min_delay
)
1768 dev_priv
->max_delay
++;
1771 spin_unlock(&mchdev_lock
);
1775 EXPORT_SYMBOL_GPL(i915_gpu_lower
);
1778 * i915_gpu_busy - indicate GPU business to IPS
1780 * Tell the IPS driver whether or not the GPU is busy.
1782 bool i915_gpu_busy(void)
1784 struct drm_i915_private
*dev_priv
;
1787 spin_lock(&mchdev_lock
);
1790 dev_priv
= i915_mch_dev
;
1792 ret
= dev_priv
->busy
;
1795 spin_unlock(&mchdev_lock
);
1799 EXPORT_SYMBOL_GPL(i915_gpu_busy
);
1802 * i915_gpu_turbo_disable - disable graphics turbo
1804 * Disable graphics turbo by resetting the max frequency and setting the
1805 * current frequency to the default.
1807 bool i915_gpu_turbo_disable(void)
1809 struct drm_i915_private
*dev_priv
;
1812 spin_lock(&mchdev_lock
);
1813 if (!i915_mch_dev
) {
1817 dev_priv
= i915_mch_dev
;
1819 dev_priv
->max_delay
= dev_priv
->fstart
;
1821 if (!ironlake_set_drps(dev_priv
->dev
, dev_priv
->fstart
))
1825 spin_unlock(&mchdev_lock
);
1829 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable
);
1832 * Tells the intel_ips driver that the i915 driver is now loaded, if
1833 * IPS got loaded first.
1835 * This awkward dance is so that neither module has to depend on the
1836 * other in order for IPS to do the appropriate communication of
1837 * GPU turbo limits to i915.
1840 ips_ping_for_i915_load(void)
1844 link
= symbol_get(ips_link_to_i915_driver
);
1847 symbol_put(ips_link_to_i915_driver
);
1852 * i915_driver_load - setup chip and create an initial config
1854 * @flags: startup flags
1856 * The driver load routine has to do several things:
1857 * - drive output discovery via intel_modeset_init()
1858 * - initialize the memory manager
1859 * - allocate initial config memory
1860 * - setup the DRM framebuffer with the allocated memory
1862 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1864 struct drm_i915_private
*dev_priv
;
1865 int ret
= 0, mmio_bar
;
1868 /* i915 has 4 more counters */
1870 dev
->types
[6] = _DRM_STAT_IRQ
;
1871 dev
->types
[7] = _DRM_STAT_PRIMARY
;
1872 dev
->types
[8] = _DRM_STAT_SECONDARY
;
1873 dev
->types
[9] = _DRM_STAT_DMA
;
1875 dev_priv
= kzalloc(sizeof(drm_i915_private_t
), GFP_KERNEL
);
1876 if (dev_priv
== NULL
)
1879 dev
->dev_private
= (void *)dev_priv
;
1880 dev_priv
->dev
= dev
;
1881 dev_priv
->info
= (struct intel_device_info
*) flags
;
1883 if (i915_get_bridge_dev(dev
)) {
1888 /* overlay on gen2 is broken and can't address above 1G */
1890 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1892 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1893 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, 0);
1894 if (!dev_priv
->regs
) {
1895 DRM_ERROR("failed to map registers\n");
1900 dev_priv
->mm
.gtt
= intel_gtt_get();
1901 if (!dev_priv
->mm
.gtt
) {
1902 DRM_ERROR("Failed to initialize GTT\n");
1907 agp_size
= dev_priv
->mm
.gtt
->gtt_mappable_entries
<< PAGE_SHIFT
;
1909 dev_priv
->mm
.gtt_mapping
=
1910 io_mapping_create_wc(dev
->agp
->base
, agp_size
);
1911 if (dev_priv
->mm
.gtt_mapping
== NULL
) {
1916 /* Set up a WC MTRR for non-PAT systems. This is more common than
1917 * one would think, because the kernel disables PAT on first
1918 * generation Core chips because WC PAT gets overridden by a UC
1919 * MTRR if present. Even if a UC MTRR isn't present.
1921 dev_priv
->mm
.gtt_mtrr
= mtrr_add(dev
->agp
->base
,
1923 MTRR_TYPE_WRCOMB
, 1);
1924 if (dev_priv
->mm
.gtt_mtrr
< 0) {
1925 DRM_INFO("MTRR allocation failed. Graphics "
1926 "performance may suffer.\n");
1929 /* The i915 workqueue is primarily used for batched retirement of
1930 * requests (and thus managing bo) once the task has been completed
1931 * by the GPU. i915_gem_retire_requests() is called directly when we
1932 * need high-priority retirement, such as waiting for an explicit
1935 * It is also used for periodic low-priority events, such as
1936 * idle-timers and recording error state.
1938 * All tasks on the workqueue are expected to acquire the dev mutex
1939 * so there is no point in running more than one instance of the
1940 * workqueue at any time: max_active = 1 and NON_REENTRANT.
1942 dev_priv
->wq
= alloc_workqueue("i915",
1943 WQ_UNBOUND
| WQ_NON_REENTRANT
,
1945 if (dev_priv
->wq
== NULL
) {
1946 DRM_ERROR("Failed to create our workqueue.\n");
1951 /* enable GEM by default */
1952 dev_priv
->has_gem
= 1;
1954 dev
->driver
->get_vblank_counter
= i915_get_vblank_counter
;
1955 dev
->max_vblank_count
= 0xffffff; /* only 24 bits of frame count */
1956 if (IS_G4X(dev
) || IS_GEN5(dev
) || IS_GEN6(dev
)) {
1957 dev
->max_vblank_count
= 0xffffffff; /* full 32 bit counter */
1958 dev
->driver
->get_vblank_counter
= gm45_get_vblank_counter
;
1961 /* Try to make sure MCHBAR is enabled before poking at it */
1962 intel_setup_mchbar(dev
);
1963 intel_setup_gmbus(dev
);
1964 intel_opregion_setup(dev
);
1966 /* Make sure the bios did its job and set up vital registers */
1967 intel_setup_bios(dev
);
1972 if (!I915_NEED_GFX_HWS(dev
)) {
1973 ret
= i915_init_phys_hws(dev
);
1975 goto out_gem_unload
;
1978 if (IS_PINEVIEW(dev
))
1979 i915_pineview_get_mem_freq(dev
);
1980 else if (IS_GEN5(dev
))
1981 i915_ironlake_get_mem_freq(dev
);
1983 /* On the 945G/GM, the chipset reports the MSI capability on the
1984 * integrated graphics even though the support isn't actually there
1985 * according to the published specs. It doesn't appear to function
1986 * correctly in testing on 945G.
1987 * This may be a side effect of MSI having been made available for PEG
1988 * and the registers being closely associated.
1990 * According to chipset errata, on the 965GM, MSI interrupts may
1991 * be lost or delayed, but we use them anyways to avoid
1992 * stuck interrupts on some machines.
1994 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
1995 pci_enable_msi(dev
->pdev
);
1997 spin_lock_init(&dev_priv
->irq_lock
);
1998 spin_lock_init(&dev_priv
->error_lock
);
1999 dev_priv
->trace_irq_seqno
= 0;
2001 ret
= drm_vblank_init(dev
, I915_NUM_PIPE
);
2003 goto out_gem_unload
;
2005 /* Start out suspended */
2006 dev_priv
->mm
.suspended
= 1;
2008 intel_detect_pch(dev
);
2010 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2011 ret
= i915_load_modeset_init(dev
);
2013 DRM_ERROR("failed to init modeset\n");
2014 goto out_gem_unload
;
2018 /* Must be done after probing outputs */
2019 intel_opregion_init(dev
);
2020 acpi_video_register();
2022 setup_timer(&dev_priv
->hangcheck_timer
, i915_hangcheck_elapsed
,
2023 (unsigned long) dev
);
2025 spin_lock(&mchdev_lock
);
2026 i915_mch_dev
= dev_priv
;
2027 dev_priv
->mchdev_lock
= &mchdev_lock
;
2028 spin_unlock(&mchdev_lock
);
2030 ips_ping_for_i915_load();
2035 if (dev
->pdev
->msi_enabled
)
2036 pci_disable_msi(dev
->pdev
);
2038 intel_teardown_gmbus(dev
);
2039 intel_teardown_mchbar(dev
);
2040 destroy_workqueue(dev_priv
->wq
);
2042 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2044 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2046 pci_dev_put(dev_priv
->bridge_dev
);
2052 int i915_driver_unload(struct drm_device
*dev
)
2054 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
2057 spin_lock(&mchdev_lock
);
2058 i915_mch_dev
= NULL
;
2059 spin_unlock(&mchdev_lock
);
2061 if (dev_priv
->mm
.inactive_shrinker
.shrink
)
2062 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
2064 mutex_lock(&dev
->struct_mutex
);
2065 ret
= i915_gpu_idle(dev
);
2067 DRM_ERROR("failed to idle hardware: %d\n", ret
);
2068 mutex_unlock(&dev
->struct_mutex
);
2070 /* Cancel the retire work handler, which should be idle now. */
2071 cancel_delayed_work_sync(&dev_priv
->mm
.retire_work
);
2073 io_mapping_free(dev_priv
->mm
.gtt_mapping
);
2074 if (dev_priv
->mm
.gtt_mtrr
>= 0) {
2075 mtrr_del(dev_priv
->mm
.gtt_mtrr
, dev
->agp
->base
,
2076 dev
->agp
->agp_info
.aper_size
* 1024 * 1024);
2077 dev_priv
->mm
.gtt_mtrr
= -1;
2080 acpi_video_unregister();
2082 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2083 intel_fbdev_fini(dev
);
2084 intel_modeset_cleanup(dev
);
2087 * free the memory space allocated for the child device
2088 * config parsed from VBT
2090 if (dev_priv
->child_dev
&& dev_priv
->child_dev_num
) {
2091 kfree(dev_priv
->child_dev
);
2092 dev_priv
->child_dev
= NULL
;
2093 dev_priv
->child_dev_num
= 0;
2096 vga_switcheroo_unregister_client(dev
->pdev
);
2097 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
2100 /* Free error state after interrupts are fully disabled. */
2101 del_timer_sync(&dev_priv
->hangcheck_timer
);
2102 cancel_work_sync(&dev_priv
->error_work
);
2103 i915_destroy_error_state(dev
);
2105 if (dev
->pdev
->msi_enabled
)
2106 pci_disable_msi(dev
->pdev
);
2108 intel_opregion_fini(dev
);
2110 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2111 /* Flush any outstanding unpin_work. */
2112 flush_workqueue(dev_priv
->wq
);
2114 i915_gem_free_all_phys_object(dev
);
2116 mutex_lock(&dev
->struct_mutex
);
2117 i915_gem_cleanup_ringbuffer(dev
);
2118 mutex_unlock(&dev
->struct_mutex
);
2119 if (I915_HAS_FBC(dev
) && i915_powersave
)
2120 i915_cleanup_compression(dev
);
2121 drm_mm_takedown(&dev_priv
->mm
.stolen
);
2123 intel_cleanup_overlay(dev
);
2125 if (!I915_NEED_GFX_HWS(dev
))
2129 if (dev_priv
->regs
!= NULL
)
2130 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
2132 intel_teardown_gmbus(dev
);
2133 intel_teardown_mchbar(dev
);
2135 destroy_workqueue(dev_priv
->wq
);
2137 pci_dev_put(dev_priv
->bridge_dev
);
2138 kfree(dev
->dev_private
);
2143 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
2145 struct drm_i915_file_private
*file_priv
;
2147 DRM_DEBUG_DRIVER("\n");
2148 file_priv
= kmalloc(sizeof(*file_priv
), GFP_KERNEL
);
2152 file
->driver_priv
= file_priv
;
2154 spin_lock_init(&file_priv
->mm
.lock
);
2155 INIT_LIST_HEAD(&file_priv
->mm
.request_list
);
2161 * i915_driver_lastclose - clean up after all DRM clients have exited
2164 * Take care of cleaning up after all DRM clients have exited. In the
2165 * mode setting case, we want to restore the kernel's initial mode (just
2166 * in case the last client left us in a bad state).
2168 * Additionally, in the non-mode setting case, we'll tear down the AGP
2169 * and DMA structures, since the kernel won't be using them, and clea
2172 void i915_driver_lastclose(struct drm_device
* dev
)
2174 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2176 if (!dev_priv
|| drm_core_check_feature(dev
, DRIVER_MODESET
)) {
2177 drm_fb_helper_restore();
2178 vga_switcheroo_process_delayed_switch();
2182 i915_gem_lastclose(dev
);
2184 if (dev_priv
->agp_heap
)
2185 i915_mem_takedown(&(dev_priv
->agp_heap
));
2187 i915_dma_cleanup(dev
);
2190 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
2192 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
2193 i915_gem_release(dev
, file_priv
);
2194 if (!drm_core_check_feature(dev
, DRIVER_MODESET
))
2195 i915_mem_release(dev
, file_priv
, dev_priv
->agp_heap
);
2198 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
2200 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
2205 struct drm_ioctl_desc i915_ioctls
[] = {
2206 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2207 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
2208 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
2209 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
2210 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
2211 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
2212 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
),
2213 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2214 DRM_IOCTL_DEF_DRV(I915_ALLOC
, i915_mem_alloc
, DRM_AUTH
),
2215 DRM_IOCTL_DEF_DRV(I915_FREE
, i915_mem_free
, DRM_AUTH
),
2216 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, i915_mem_init_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2217 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
2218 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, i915_mem_destroy_heap
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2219 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, i915_vblank_pipe_set
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2220 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
2221 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
2222 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
2223 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2224 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
2225 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
),
2226 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2227 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2228 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2229 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
),
2230 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2231 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
2232 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
),
2233 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
),
2234 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
),
2235 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
),
2236 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
),
2237 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
),
2238 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
),
2239 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
),
2240 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
),
2241 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
),
2242 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
2243 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
),
2244 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2245 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
2248 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
2251 * Determine if the device really is AGP or not.
2253 * All Intel graphics chipsets are treated as AGP, even if they are really
2256 * \param dev The device to be tested.
2259 * A value of 1 is always retured to indictate every i9x5 is AGP.
2261 int i915_driver_device_is_agp(struct drm_device
* dev
)