1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
46 #include <linux/pm_runtime.h>
48 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
50 #define BEGIN_LP_RING(n) \
51 intel_ring_begin(LP_RING(dev_priv), (n))
54 intel_ring_emit(LP_RING(dev_priv), x)
56 #define ADVANCE_LP_RING() \
57 __intel_ring_advance(LP_RING(dev_priv))
60 * Lock test for when it's just for synchronization of ring access.
62 * In that case, we don't need to do it when GEM is initialized as nobody else
63 * has access to the ring.
65 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
66 if (LP_RING(dev->dev_private)->obj == NULL) \
67 LOCK_TEST_WITH_RETURN(dev, file); \
71 intel_read_legacy_status_page(struct drm_i915_private
*dev_priv
, int reg
)
73 if (I915_NEED_GFX_HWS(dev_priv
->dev
))
74 return ioread32(dev_priv
->dri1
.gfx_hws_cpu_addr
+ reg
);
76 return intel_read_status_page(LP_RING(dev_priv
), reg
);
79 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
80 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
81 #define I915_BREADCRUMB_INDEX 0x21
83 void i915_update_dri1_breadcrumb(struct drm_device
*dev
)
85 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
86 struct drm_i915_master_private
*master_priv
;
88 if (dev
->primary
->master
) {
89 master_priv
= dev
->primary
->master
->driver_priv
;
90 if (master_priv
->sarea_priv
)
91 master_priv
->sarea_priv
->last_dispatch
=
92 READ_BREADCRUMB(dev_priv
);
96 static void i915_write_hws_pga(struct drm_device
*dev
)
98 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
101 addr
= dev_priv
->status_page_dmah
->busaddr
;
102 if (INTEL_INFO(dev
)->gen
>= 4)
103 addr
|= (dev_priv
->status_page_dmah
->busaddr
>> 28) & 0xf0;
104 I915_WRITE(HWS_PGA
, addr
);
108 * Frees the hardware status page, whether it's a physical address or a virtual
109 * address set up by the X Server.
111 static void i915_free_hws(struct drm_device
*dev
)
113 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
114 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
116 if (dev_priv
->status_page_dmah
) {
117 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
118 dev_priv
->status_page_dmah
= NULL
;
121 if (ring
->status_page
.gfx_addr
) {
122 ring
->status_page
.gfx_addr
= 0;
123 iounmap(dev_priv
->dri1
.gfx_hws_cpu_addr
);
126 /* Need to rewrite hardware status page */
127 I915_WRITE(HWS_PGA
, 0x1ffff000);
130 void i915_kernel_lost_context(struct drm_device
* dev
)
132 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
133 struct drm_i915_master_private
*master_priv
;
134 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
137 * We should never lose context on the ring with modesetting
138 * as we don't expose it to userspace
140 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
143 ring
->head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
144 ring
->tail
= I915_READ_TAIL(ring
) & TAIL_ADDR
;
145 ring
->space
= ring
->head
- (ring
->tail
+ I915_RING_FREE_SPACE
);
147 ring
->space
+= ring
->size
;
149 if (!dev
->primary
->master
)
152 master_priv
= dev
->primary
->master
->driver_priv
;
153 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
154 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
157 static int i915_dma_cleanup(struct drm_device
* dev
)
159 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
162 /* Make sure interrupts are disabled here because the uninstall ioctl
163 * may not have been called from userspace and after dev_private
164 * is freed, it's too late.
166 if (dev
->irq_enabled
)
167 drm_irq_uninstall(dev
);
169 mutex_lock(&dev
->struct_mutex
);
170 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
171 intel_cleanup_ring_buffer(&dev_priv
->ring
[i
]);
172 mutex_unlock(&dev
->struct_mutex
);
174 /* Clear the HWS virtual address at teardown */
175 if (I915_NEED_GFX_HWS(dev
))
181 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
183 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
184 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
187 master_priv
->sarea
= drm_getsarea(dev
);
188 if (master_priv
->sarea
) {
189 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
190 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
192 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
195 if (init
->ring_size
!= 0) {
196 if (LP_RING(dev_priv
)->obj
!= NULL
) {
197 i915_dma_cleanup(dev
);
198 DRM_ERROR("Client tried to initialize ringbuffer in "
203 ret
= intel_render_ring_init_dri(dev
,
207 i915_dma_cleanup(dev
);
212 dev_priv
->dri1
.cpp
= init
->cpp
;
213 dev_priv
->dri1
.back_offset
= init
->back_offset
;
214 dev_priv
->dri1
.front_offset
= init
->front_offset
;
215 dev_priv
->dri1
.current_page
= 0;
216 if (master_priv
->sarea_priv
)
217 master_priv
->sarea_priv
->pf_current_page
= 0;
219 /* Allow hardware batchbuffers unless told otherwise.
221 dev_priv
->dri1
.allow_batchbuffer
= 1;
226 static int i915_dma_resume(struct drm_device
* dev
)
228 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
229 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
231 DRM_DEBUG_DRIVER("%s\n", __func__
);
233 if (ring
->virtual_start
== NULL
) {
234 DRM_ERROR("can not ioremap virtual address for"
239 /* Program Hardware Status Page */
240 if (!ring
->status_page
.page_addr
) {
241 DRM_ERROR("Can not find hardware status page\n");
244 DRM_DEBUG_DRIVER("hw status page @ %p\n",
245 ring
->status_page
.page_addr
);
246 if (ring
->status_page
.gfx_addr
!= 0)
247 intel_ring_setup_status_page(ring
);
249 i915_write_hws_pga(dev
);
251 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
256 static int i915_dma_init(struct drm_device
*dev
, void *data
,
257 struct drm_file
*file_priv
)
259 drm_i915_init_t
*init
= data
;
262 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
265 switch (init
->func
) {
267 retcode
= i915_initialize(dev
, init
);
269 case I915_CLEANUP_DMA
:
270 retcode
= i915_dma_cleanup(dev
);
272 case I915_RESUME_DMA
:
273 retcode
= i915_dma_resume(dev
);
283 /* Implement basically the same security restrictions as hardware does
284 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
286 * Most of the calculations below involve calculating the size of a
287 * particular instruction. It's important to get the size right as
288 * that tells us where the next instruction to check is. Any illegal
289 * instruction detected will be given a size of zero, which is a
290 * signal to abort the rest of the buffer.
292 static int validate_cmd(int cmd
)
294 switch (((cmd
>> 29) & 0x7)) {
296 switch ((cmd
>> 23) & 0x3f) {
298 return 1; /* MI_NOOP */
300 return 1; /* MI_FLUSH */
302 return 0; /* disallow everything else */
306 return 0; /* reserved */
308 return (cmd
& 0xff) + 2; /* 2d commands */
310 if (((cmd
>> 24) & 0x1f) <= 0x18)
313 switch ((cmd
>> 24) & 0x1f) {
317 switch ((cmd
>> 16) & 0xff) {
319 return (cmd
& 0x1f) + 2;
321 return (cmd
& 0xf) + 2;
323 return (cmd
& 0xffff) + 2;
327 return (cmd
& 0xffff) + 1;
331 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
332 return (cmd
& 0x1ffff) + 2;
333 else if (cmd
& (1 << 17)) /* indirect random */
334 if ((cmd
& 0xffff) == 0)
335 return 0; /* unknown length, too hard */
337 return (((cmd
& 0xffff) + 1) / 2) + 1;
339 return 2; /* indirect sequential */
350 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
352 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
355 if ((dwords
+1) * sizeof(int) >= LP_RING(dev_priv
)->size
- 8)
358 for (i
= 0; i
< dwords
;) {
359 int sz
= validate_cmd(buffer
[i
]);
360 if (sz
== 0 || i
+ sz
> dwords
)
365 ret
= BEGIN_LP_RING((dwords
+1)&~1);
369 for (i
= 0; i
< dwords
; i
++)
380 i915_emit_box(struct drm_device
*dev
,
381 struct drm_clip_rect
*box
,
384 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
387 if (box
->y2
<= box
->y1
|| box
->x2
<= box
->x1
||
388 box
->y2
<= 0 || box
->x2
<= 0) {
389 DRM_ERROR("Bad box %d,%d..%d,%d\n",
390 box
->x1
, box
->y1
, box
->x2
, box
->y2
);
394 if (INTEL_INFO(dev
)->gen
>= 4) {
395 ret
= BEGIN_LP_RING(4);
399 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
400 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
401 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
404 ret
= BEGIN_LP_RING(6);
408 OUT_RING(GFX_OP_DRAWRECT_INFO
);
410 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
411 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
420 /* XXX: Emitting the counter should really be moved to part of the IRQ
421 * emit. For now, do it in both places:
424 static void i915_emit_breadcrumb(struct drm_device
*dev
)
426 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
427 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
429 dev_priv
->dri1
.counter
++;
430 if (dev_priv
->dri1
.counter
> 0x7FFFFFFFUL
)
431 dev_priv
->dri1
.counter
= 0;
432 if (master_priv
->sarea_priv
)
433 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
;
435 if (BEGIN_LP_RING(4) == 0) {
436 OUT_RING(MI_STORE_DWORD_INDEX
);
437 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
438 OUT_RING(dev_priv
->dri1
.counter
);
444 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
445 drm_i915_cmdbuffer_t
*cmd
,
446 struct drm_clip_rect
*cliprects
,
449 int nbox
= cmd
->num_cliprects
;
450 int i
= 0, count
, ret
;
453 DRM_ERROR("alignment");
457 i915_kernel_lost_context(dev
);
459 count
= nbox
? nbox
: 1;
461 for (i
= 0; i
< count
; i
++) {
463 ret
= i915_emit_box(dev
, &cliprects
[i
],
469 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
474 i915_emit_breadcrumb(dev
);
478 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
479 drm_i915_batchbuffer_t
* batch
,
480 struct drm_clip_rect
*cliprects
)
482 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
483 int nbox
= batch
->num_cliprects
;
486 if ((batch
->start
| batch
->used
) & 0x7) {
487 DRM_ERROR("alignment");
491 i915_kernel_lost_context(dev
);
493 count
= nbox
? nbox
: 1;
494 for (i
= 0; i
< count
; i
++) {
496 ret
= i915_emit_box(dev
, &cliprects
[i
],
497 batch
->DR1
, batch
->DR4
);
502 if (!IS_I830(dev
) && !IS_845G(dev
)) {
503 ret
= BEGIN_LP_RING(2);
507 if (INTEL_INFO(dev
)->gen
>= 4) {
508 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
509 OUT_RING(batch
->start
);
511 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
512 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
515 ret
= BEGIN_LP_RING(4);
519 OUT_RING(MI_BATCH_BUFFER
);
520 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
521 OUT_RING(batch
->start
+ batch
->used
- 4);
528 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
529 if (BEGIN_LP_RING(2) == 0) {
530 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
536 i915_emit_breadcrumb(dev
);
540 static int i915_dispatch_flip(struct drm_device
* dev
)
542 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
543 struct drm_i915_master_private
*master_priv
=
544 dev
->primary
->master
->driver_priv
;
547 if (!master_priv
->sarea_priv
)
550 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
552 dev_priv
->dri1
.current_page
,
553 master_priv
->sarea_priv
->pf_current_page
);
555 i915_kernel_lost_context(dev
);
557 ret
= BEGIN_LP_RING(10);
561 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
564 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
566 if (dev_priv
->dri1
.current_page
== 0) {
567 OUT_RING(dev_priv
->dri1
.back_offset
);
568 dev_priv
->dri1
.current_page
= 1;
570 OUT_RING(dev_priv
->dri1
.front_offset
);
571 dev_priv
->dri1
.current_page
= 0;
575 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
580 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
++;
582 if (BEGIN_LP_RING(4) == 0) {
583 OUT_RING(MI_STORE_DWORD_INDEX
);
584 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
585 OUT_RING(dev_priv
->dri1
.counter
);
590 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->dri1
.current_page
;
594 static int i915_quiescent(struct drm_device
*dev
)
596 i915_kernel_lost_context(dev
);
597 return intel_ring_idle(LP_RING(dev
->dev_private
));
600 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
601 struct drm_file
*file_priv
)
605 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
608 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
610 mutex_lock(&dev
->struct_mutex
);
611 ret
= i915_quiescent(dev
);
612 mutex_unlock(&dev
->struct_mutex
);
617 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
618 struct drm_file
*file_priv
)
620 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
621 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
622 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
623 master_priv
->sarea_priv
;
624 drm_i915_batchbuffer_t
*batch
= data
;
626 struct drm_clip_rect
*cliprects
= NULL
;
628 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
631 if (!dev_priv
->dri1
.allow_batchbuffer
) {
632 DRM_ERROR("Batchbuffer ioctl disabled\n");
636 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
637 batch
->start
, batch
->used
, batch
->num_cliprects
);
639 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
641 if (batch
->num_cliprects
< 0)
644 if (batch
->num_cliprects
) {
645 cliprects
= kcalloc(batch
->num_cliprects
,
648 if (cliprects
== NULL
)
651 ret
= copy_from_user(cliprects
, batch
->cliprects
,
652 batch
->num_cliprects
*
653 sizeof(struct drm_clip_rect
));
660 mutex_lock(&dev
->struct_mutex
);
661 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
662 mutex_unlock(&dev
->struct_mutex
);
665 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
673 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
674 struct drm_file
*file_priv
)
676 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
677 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
678 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
679 master_priv
->sarea_priv
;
680 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
681 struct drm_clip_rect
*cliprects
= NULL
;
685 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
686 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
688 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
691 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
693 if (cmdbuf
->num_cliprects
< 0)
696 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
697 if (batch_data
== NULL
)
700 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
703 goto fail_batch_free
;
706 if (cmdbuf
->num_cliprects
) {
707 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
708 sizeof(*cliprects
), GFP_KERNEL
);
709 if (cliprects
== NULL
) {
711 goto fail_batch_free
;
714 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
715 cmdbuf
->num_cliprects
*
716 sizeof(struct drm_clip_rect
));
723 mutex_lock(&dev
->struct_mutex
);
724 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
725 mutex_unlock(&dev
->struct_mutex
);
727 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
732 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
742 static int i915_emit_irq(struct drm_device
* dev
)
744 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
745 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
747 i915_kernel_lost_context(dev
);
749 DRM_DEBUG_DRIVER("\n");
751 dev_priv
->dri1
.counter
++;
752 if (dev_priv
->dri1
.counter
> 0x7FFFFFFFUL
)
753 dev_priv
->dri1
.counter
= 1;
754 if (master_priv
->sarea_priv
)
755 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
;
757 if (BEGIN_LP_RING(4) == 0) {
758 OUT_RING(MI_STORE_DWORD_INDEX
);
759 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
760 OUT_RING(dev_priv
->dri1
.counter
);
761 OUT_RING(MI_USER_INTERRUPT
);
765 return dev_priv
->dri1
.counter
;
768 static int i915_wait_irq(struct drm_device
* dev
, int irq_nr
)
770 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
771 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
773 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
775 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr
,
776 READ_BREADCRUMB(dev_priv
));
778 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
) {
779 if (master_priv
->sarea_priv
)
780 master_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
784 if (master_priv
->sarea_priv
)
785 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
787 if (ring
->irq_get(ring
)) {
788 DRM_WAIT_ON(ret
, ring
->irq_queue
, 3 * DRM_HZ
,
789 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
791 } else if (wait_for(READ_BREADCRUMB(dev_priv
) >= irq_nr
, 3000))
795 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
796 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->dri1
.counter
);
802 /* Needs the lock as it touches the ring.
804 static int i915_irq_emit(struct drm_device
*dev
, void *data
,
805 struct drm_file
*file_priv
)
807 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
808 drm_i915_irq_emit_t
*emit
= data
;
811 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
814 if (!dev_priv
|| !LP_RING(dev_priv
)->virtual_start
) {
815 DRM_ERROR("called with no initialization\n");
819 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
821 mutex_lock(&dev
->struct_mutex
);
822 result
= i915_emit_irq(dev
);
823 mutex_unlock(&dev
->struct_mutex
);
825 if (DRM_COPY_TO_USER(emit
->irq_seq
, &result
, sizeof(int))) {
826 DRM_ERROR("copy_to_user\n");
833 /* Doesn't need the hardware lock.
835 static int i915_irq_wait(struct drm_device
*dev
, void *data
,
836 struct drm_file
*file_priv
)
838 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
839 drm_i915_irq_wait_t
*irqwait
= data
;
841 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
845 DRM_ERROR("called with no initialization\n");
849 return i915_wait_irq(dev
, irqwait
->irq_seq
);
852 static int i915_vblank_pipe_get(struct drm_device
*dev
, void *data
,
853 struct drm_file
*file_priv
)
855 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
856 drm_i915_vblank_pipe_t
*pipe
= data
;
858 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
862 DRM_ERROR("called with no initialization\n");
866 pipe
->pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
872 * Schedule buffer swap at given vertical blank.
874 static int i915_vblank_swap(struct drm_device
*dev
, void *data
,
875 struct drm_file
*file_priv
)
877 /* The delayed swap mechanism was fundamentally racy, and has been
878 * removed. The model was that the client requested a delayed flip/swap
879 * from the kernel, then waited for vblank before continuing to perform
880 * rendering. The problem was that the kernel might wake the client
881 * up before it dispatched the vblank swap (since the lock has to be
882 * held while touching the ringbuffer), in which case the client would
883 * clear and start the next frame before the swap occurred, and
884 * flicker would occur in addition to likely missing the vblank.
886 * In the absence of this ioctl, userland falls back to a correct path
887 * of waiting for a vblank, then dispatching the swap on its own.
888 * Context switching to userland and back is plenty fast enough for
889 * meeting the requirements of vblank swapping.
894 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
895 struct drm_file
*file_priv
)
899 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
902 DRM_DEBUG_DRIVER("%s\n", __func__
);
904 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
906 mutex_lock(&dev
->struct_mutex
);
907 ret
= i915_dispatch_flip(dev
);
908 mutex_unlock(&dev
->struct_mutex
);
913 static int i915_getparam(struct drm_device
*dev
, void *data
,
914 struct drm_file
*file_priv
)
916 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
917 drm_i915_getparam_t
*param
= data
;
921 DRM_ERROR("called with no initialization\n");
925 switch (param
->param
) {
926 case I915_PARAM_IRQ_ACTIVE
:
927 value
= dev
->pdev
->irq
? 1 : 0;
929 case I915_PARAM_ALLOW_BATCHBUFFER
:
930 value
= dev_priv
->dri1
.allow_batchbuffer
? 1 : 0;
932 case I915_PARAM_LAST_DISPATCH
:
933 value
= READ_BREADCRUMB(dev_priv
);
935 case I915_PARAM_CHIPSET_ID
:
936 value
= dev
->pdev
->device
;
938 case I915_PARAM_HAS_GEM
:
941 case I915_PARAM_NUM_FENCES_AVAIL
:
942 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
944 case I915_PARAM_HAS_OVERLAY
:
945 value
= dev_priv
->overlay
? 1 : 0;
947 case I915_PARAM_HAS_PAGEFLIPPING
:
950 case I915_PARAM_HAS_EXECBUF2
:
954 case I915_PARAM_HAS_BSD
:
955 value
= intel_ring_initialized(&dev_priv
->ring
[VCS
]);
957 case I915_PARAM_HAS_BLT
:
958 value
= intel_ring_initialized(&dev_priv
->ring
[BCS
]);
960 case I915_PARAM_HAS_VEBOX
:
961 value
= intel_ring_initialized(&dev_priv
->ring
[VECS
]);
963 case I915_PARAM_HAS_RELAXED_FENCING
:
966 case I915_PARAM_HAS_COHERENT_RINGS
:
969 case I915_PARAM_HAS_EXEC_CONSTANTS
:
970 value
= INTEL_INFO(dev
)->gen
>= 4;
972 case I915_PARAM_HAS_RELAXED_DELTA
:
975 case I915_PARAM_HAS_GEN7_SOL_RESET
:
978 case I915_PARAM_HAS_LLC
:
979 value
= HAS_LLC(dev
);
981 case I915_PARAM_HAS_WT
:
984 case I915_PARAM_HAS_ALIASING_PPGTT
:
985 value
= dev_priv
->mm
.aliasing_ppgtt
? 1 : 0;
987 case I915_PARAM_HAS_WAIT_TIMEOUT
:
990 case I915_PARAM_HAS_SEMAPHORES
:
991 value
= i915_semaphore_is_enabled(dev
);
993 case I915_PARAM_HAS_PRIME_VMAP_FLUSH
:
996 case I915_PARAM_HAS_SECURE_BATCHES
:
997 value
= capable(CAP_SYS_ADMIN
);
999 case I915_PARAM_HAS_PINNED_BATCHES
:
1002 case I915_PARAM_HAS_EXEC_NO_RELOC
:
1005 case I915_PARAM_HAS_EXEC_HANDLE_LUT
:
1009 DRM_DEBUG("Unknown parameter %d\n", param
->param
);
1013 if (DRM_COPY_TO_USER(param
->value
, &value
, sizeof(int))) {
1014 DRM_ERROR("DRM_COPY_TO_USER failed\n");
1021 static int i915_setparam(struct drm_device
*dev
, void *data
,
1022 struct drm_file
*file_priv
)
1024 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1025 drm_i915_setparam_t
*param
= data
;
1028 DRM_ERROR("called with no initialization\n");
1032 switch (param
->param
) {
1033 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
1035 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
1037 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
1038 dev_priv
->dri1
.allow_batchbuffer
= param
->value
? 1 : 0;
1040 case I915_SETPARAM_NUM_USED_FENCES
:
1041 if (param
->value
> dev_priv
->num_fence_regs
||
1044 /* Userspace can use first N regs */
1045 dev_priv
->fence_reg_start
= param
->value
;
1048 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1056 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
1057 struct drm_file
*file_priv
)
1059 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1060 drm_i915_hws_addr_t
*hws
= data
;
1061 struct intel_ring_buffer
*ring
;
1063 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1066 if (!I915_NEED_GFX_HWS(dev
))
1070 DRM_ERROR("called with no initialization\n");
1074 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1075 WARN(1, "tried to set status page when mode setting active\n");
1079 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
1081 ring
= LP_RING(dev_priv
);
1082 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
1084 dev_priv
->dri1
.gfx_hws_cpu_addr
=
1085 ioremap_wc(dev_priv
->gtt
.mappable_base
+ hws
->addr
, 4096);
1086 if (dev_priv
->dri1
.gfx_hws_cpu_addr
== NULL
) {
1087 i915_dma_cleanup(dev
);
1088 ring
->status_page
.gfx_addr
= 0;
1089 DRM_ERROR("can not ioremap virtual address for"
1090 " G33 hw status page\n");
1094 memset_io(dev_priv
->dri1
.gfx_hws_cpu_addr
, 0, PAGE_SIZE
);
1095 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
1097 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1098 ring
->status_page
.gfx_addr
);
1099 DRM_DEBUG_DRIVER("load hws at %p\n",
1100 ring
->status_page
.page_addr
);
1104 static int i915_get_bridge_dev(struct drm_device
*dev
)
1106 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1108 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1109 if (!dev_priv
->bridge_dev
) {
1110 DRM_ERROR("bridge device not found\n");
1116 #define MCHBAR_I915 0x44
1117 #define MCHBAR_I965 0x48
1118 #define MCHBAR_SIZE (4*4096)
1120 #define DEVEN_REG 0x54
1121 #define DEVEN_MCHBAR_EN (1 << 28)
1123 /* Allocate space for the MCH regs if needed, return nonzero on error */
1125 intel_alloc_mchbar_resource(struct drm_device
*dev
)
1127 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1128 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1129 u32 temp_lo
, temp_hi
= 0;
1133 if (INTEL_INFO(dev
)->gen
>= 4)
1134 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
1135 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
1136 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
1138 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1141 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
1145 /* Get some space for it */
1146 dev_priv
->mch_res
.name
= "i915 MCHBAR";
1147 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
1148 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
1150 MCHBAR_SIZE
, MCHBAR_SIZE
,
1152 0, pcibios_align_resource
,
1153 dev_priv
->bridge_dev
);
1155 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
1156 dev_priv
->mch_res
.start
= 0;
1160 if (INTEL_INFO(dev
)->gen
>= 4)
1161 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
1162 upper_32_bits(dev_priv
->mch_res
.start
));
1164 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
1165 lower_32_bits(dev_priv
->mch_res
.start
));
1169 /* Setup MCHBAR if possible, return true if we should disable it again */
1171 intel_setup_mchbar(struct drm_device
*dev
)
1173 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1174 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1178 dev_priv
->mchbar_need_disable
= false;
1180 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1181 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
1182 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
1184 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1188 /* If it's already enabled, don't have to do anything */
1192 if (intel_alloc_mchbar_resource(dev
))
1195 dev_priv
->mchbar_need_disable
= true;
1197 /* Space is allocated or reserved, so enable it. */
1198 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1199 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
1200 temp
| DEVEN_MCHBAR_EN
);
1202 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1203 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
1208 intel_teardown_mchbar(struct drm_device
*dev
)
1210 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1211 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1214 if (dev_priv
->mchbar_need_disable
) {
1215 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1216 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
1217 temp
&= ~DEVEN_MCHBAR_EN
;
1218 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
1220 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1222 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
1226 if (dev_priv
->mch_res
.start
)
1227 release_resource(&dev_priv
->mch_res
);
1230 /* true = enable decode, false = disable decoder */
1231 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1233 struct drm_device
*dev
= cookie
;
1235 intel_modeset_vga_set_state(dev
, state
);
1237 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1238 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1240 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1243 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1245 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1246 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1247 if (state
== VGA_SWITCHEROO_ON
) {
1248 pr_info("switched on\n");
1249 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1250 /* i915 resume handler doesn't set to D0 */
1251 pci_set_power_state(dev
->pdev
, PCI_D0
);
1253 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1255 pr_err("switched off\n");
1256 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1257 i915_suspend(dev
, pmm
);
1258 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1262 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1264 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1267 spin_lock(&dev
->count_lock
);
1268 can_switch
= (dev
->open_count
== 0);
1269 spin_unlock(&dev
->count_lock
);
1273 static const struct vga_switcheroo_client_ops i915_switcheroo_ops
= {
1274 .set_gpu_state
= i915_switcheroo_set_state
,
1276 .can_switch
= i915_switcheroo_can_switch
,
1279 static int i915_load_modeset_init(struct drm_device
*dev
)
1281 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1284 ret
= intel_parse_bios(dev
);
1286 DRM_INFO("failed to find VBIOS tables\n");
1288 /* If we have > 1 VGA cards, then we need to arbitrate access
1289 * to the common VGA resources.
1291 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1292 * then we do not take part in VGA arbitration and the
1293 * vga_client_register() fails with -ENODEV.
1295 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1296 if (ret
&& ret
!= -ENODEV
)
1299 intel_register_dsm_handler();
1301 ret
= vga_switcheroo_register_client(dev
->pdev
, &i915_switcheroo_ops
, false);
1303 goto cleanup_vga_client
;
1305 /* Initialise stolen first so that we may reserve preallocated
1306 * objects for the BIOS to KMS transition.
1308 ret
= i915_gem_init_stolen(dev
);
1310 goto cleanup_vga_switcheroo
;
1312 ret
= drm_irq_install(dev
);
1314 goto cleanup_gem_stolen
;
1316 intel_power_domains_init_hw(dev
);
1318 /* Important: The output setup functions called by modeset_init need
1319 * working irqs for e.g. gmbus and dp aux transfers. */
1320 intel_modeset_init(dev
);
1322 ret
= i915_gem_init(dev
);
1326 INIT_WORK(&dev_priv
->console_resume_work
, intel_console_resume
);
1328 intel_modeset_gem_init(dev
);
1330 /* Always safe in the mode setting case. */
1331 /* FIXME: do pre/post-mode set stuff in core KMS code */
1332 dev
->vblank_disable_allowed
= true;
1333 if (INTEL_INFO(dev
)->num_pipes
== 0) {
1334 intel_display_power_put(dev
, POWER_DOMAIN_VGA
);
1338 ret
= intel_fbdev_init(dev
);
1342 /* Only enable hotplug handling once the fbdev is fully set up. */
1343 intel_hpd_init(dev
);
1346 * Some ports require correctly set-up hpd registers for detection to
1347 * work properly (leading to ghost connected connector status), e.g. VGA
1348 * on gm45. Hence we can only set up the initial fbdev config after hpd
1349 * irqs are fully enabled. Now we should scan for the initial config
1350 * only once hotplug handling is enabled, but due to screwed-up locking
1351 * around kms/fbdev init we can't protect the fdbev initial config
1352 * scanning against hotplug events. Hence do this first and ignore the
1353 * tiny window where we will loose hotplug notifactions.
1355 intel_fbdev_initial_config(dev
);
1357 /* Only enable hotplug handling once the fbdev is fully set up. */
1358 dev_priv
->enable_hotplug_processing
= true;
1360 drm_kms_helper_poll_init(dev
);
1365 mutex_lock(&dev
->struct_mutex
);
1366 i915_gem_cleanup_ringbuffer(dev
);
1367 i915_gem_context_fini(dev
);
1368 mutex_unlock(&dev
->struct_mutex
);
1369 i915_gem_cleanup_aliasing_ppgtt(dev
);
1370 drm_mm_takedown(&dev_priv
->gtt
.base
.mm
);
1372 intel_display_power_put(dev
, POWER_DOMAIN_VGA
);
1373 drm_irq_uninstall(dev
);
1375 i915_gem_cleanup_stolen(dev
);
1376 cleanup_vga_switcheroo
:
1377 vga_switcheroo_unregister_client(dev
->pdev
);
1379 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1384 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1386 struct drm_i915_master_private
*master_priv
;
1388 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1392 master
->driver_priv
= master_priv
;
1396 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1398 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1405 master
->driver_priv
= NULL
;
1408 #ifdef CONFIG_DRM_I915_FBDEV
1409 static void i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
1411 struct apertures_struct
*ap
;
1412 struct pci_dev
*pdev
= dev_priv
->dev
->pdev
;
1415 ap
= alloc_apertures(1);
1419 ap
->ranges
[0].base
= dev_priv
->gtt
.mappable_base
;
1420 ap
->ranges
[0].size
= dev_priv
->gtt
.mappable_end
;
1423 pdev
->resource
[PCI_ROM_RESOURCE
].flags
& IORESOURCE_ROM_SHADOW
;
1425 remove_conflicting_framebuffers(ap
, "inteldrmfb", primary
);
1430 static void i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
1435 static void i915_dump_device_info(struct drm_i915_private
*dev_priv
)
1437 const struct intel_device_info
*info
= dev_priv
->info
;
1439 #define PRINT_S(name) "%s"
1441 #define PRINT_FLAG(name) info->name ? #name "," : ""
1443 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1444 DEV_INFO_FOR_EACH_FLAG(PRINT_S
, SEP_EMPTY
),
1446 dev_priv
->dev
->pdev
->device
,
1447 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_COMMA
));
1455 * i915_driver_load - setup chip and create an initial config
1457 * @flags: startup flags
1459 * The driver load routine has to do several things:
1460 * - drive output discovery via intel_modeset_init()
1461 * - initialize the memory manager
1462 * - allocate initial config memory
1463 * - setup the DRM framebuffer with the allocated memory
1465 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1467 struct drm_i915_private
*dev_priv
;
1468 struct intel_device_info
*info
;
1469 int ret
= 0, mmio_bar
, mmio_size
;
1470 uint32_t aperture_size
;
1472 info
= (struct intel_device_info
*) flags
;
1474 /* Refuse to load on gen6+ without kms enabled. */
1475 if (info
->gen
>= 6 && !drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1476 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
1477 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
1481 dev_priv
= kzalloc(sizeof(*dev_priv
), GFP_KERNEL
);
1482 if (dev_priv
== NULL
)
1485 dev
->dev_private
= (void *)dev_priv
;
1486 dev_priv
->dev
= dev
;
1487 dev_priv
->info
= info
;
1489 spin_lock_init(&dev_priv
->irq_lock
);
1490 spin_lock_init(&dev_priv
->gpu_error
.lock
);
1491 spin_lock_init(&dev_priv
->backlight_lock
);
1492 spin_lock_init(&dev_priv
->uncore
.lock
);
1493 spin_lock_init(&dev_priv
->mm
.object_stat_lock
);
1494 mutex_init(&dev_priv
->dpio_lock
);
1495 mutex_init(&dev_priv
->rps
.hw_lock
);
1496 mutex_init(&dev_priv
->modeset_restore_lock
);
1498 mutex_init(&dev_priv
->pc8
.lock
);
1499 dev_priv
->pc8
.requirements_met
= false;
1500 dev_priv
->pc8
.gpu_idle
= false;
1501 dev_priv
->pc8
.irqs_disabled
= false;
1502 dev_priv
->pc8
.enabled
= false;
1503 dev_priv
->pc8
.disable_count
= 2; /* requirements_met + gpu_idle */
1504 INIT_DELAYED_WORK(&dev_priv
->pc8
.enable_work
, hsw_enable_pc8_work
);
1506 intel_display_crc_init(dev
);
1508 i915_dump_device_info(dev_priv
);
1510 /* Not all pre-production machines fall into this category, only the
1511 * very first ones. Almost everything should work, except for maybe
1512 * suspend/resume. And we don't implement workarounds that affect only
1513 * pre-production machines. */
1514 if (IS_HSW_EARLY_SDV(dev
))
1515 DRM_INFO("This is an early pre-production Haswell machine. "
1516 "It may not be fully functional.\n");
1518 if (i915_get_bridge_dev(dev
)) {
1523 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1524 /* Before gen4, the registers and the GTT are behind different BARs.
1525 * However, from gen4 onwards, the registers and the GTT are shared
1526 * in the same BAR, so we want to restrict this ioremap from
1527 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1528 * the register BAR remains the same size for all the earlier
1529 * generations up to Ironlake.
1532 mmio_size
= 512*1024;
1534 mmio_size
= 2*1024*1024;
1536 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, mmio_size
);
1537 if (!dev_priv
->regs
) {
1538 DRM_ERROR("failed to map registers\n");
1543 intel_uncore_early_sanitize(dev
);
1545 /* This must be called before any calls to HAS_PCH_* */
1546 intel_detect_pch(dev
);
1548 intel_uncore_init(dev
);
1550 ret
= i915_gem_gtt_init(dev
);
1554 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1555 i915_kick_out_firmware_fb(dev_priv
);
1557 pci_set_master(dev
->pdev
);
1559 /* overlay on gen2 is broken and can't address above 1G */
1561 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1563 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1564 * using 32bit addressing, overwriting memory if HWS is located
1567 * The documentation also mentions an issue with undefined
1568 * behaviour if any general state is accessed within a page above 4GB,
1569 * which also needs to be handled carefully.
1571 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1572 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(32));
1574 aperture_size
= dev_priv
->gtt
.mappable_end
;
1576 dev_priv
->gtt
.mappable
=
1577 io_mapping_create_wc(dev_priv
->gtt
.mappable_base
,
1579 if (dev_priv
->gtt
.mappable
== NULL
) {
1584 dev_priv
->gtt
.mtrr
= arch_phys_wc_add(dev_priv
->gtt
.mappable_base
,
1587 /* The i915 workqueue is primarily used for batched retirement of
1588 * requests (and thus managing bo) once the task has been completed
1589 * by the GPU. i915_gem_retire_requests() is called directly when we
1590 * need high-priority retirement, such as waiting for an explicit
1593 * It is also used for periodic low-priority events, such as
1594 * idle-timers and recording error state.
1596 * All tasks on the workqueue are expected to acquire the dev mutex
1597 * so there is no point in running more than one instance of the
1598 * workqueue at any time. Use an ordered one.
1600 dev_priv
->wq
= alloc_ordered_workqueue("i915", 0);
1601 if (dev_priv
->wq
== NULL
) {
1602 DRM_ERROR("Failed to create our workqueue.\n");
1607 intel_irq_init(dev
);
1609 intel_uncore_sanitize(dev
);
1611 /* Try to make sure MCHBAR is enabled before poking at it */
1612 intel_setup_mchbar(dev
);
1613 intel_setup_gmbus(dev
);
1614 intel_opregion_setup(dev
);
1616 intel_setup_bios(dev
);
1620 /* On the 945G/GM, the chipset reports the MSI capability on the
1621 * integrated graphics even though the support isn't actually there
1622 * according to the published specs. It doesn't appear to function
1623 * correctly in testing on 945G.
1624 * This may be a side effect of MSI having been made available for PEG
1625 * and the registers being closely associated.
1627 * According to chipset errata, on the 965GM, MSI interrupts may
1628 * be lost or delayed, but we use them anyways to avoid
1629 * stuck interrupts on some machines.
1631 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
1632 pci_enable_msi(dev
->pdev
);
1634 dev_priv
->num_plane
= 1;
1635 if (IS_VALLEYVIEW(dev
))
1636 dev_priv
->num_plane
= 2;
1638 if (INTEL_INFO(dev
)->num_pipes
) {
1639 ret
= drm_vblank_init(dev
, INTEL_INFO(dev
)->num_pipes
);
1641 goto out_gem_unload
;
1644 intel_power_domains_init(dev
);
1646 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1647 ret
= i915_load_modeset_init(dev
);
1649 DRM_ERROR("failed to init modeset\n");
1650 goto out_power_well
;
1653 /* Start out suspended in ums mode. */
1654 dev_priv
->ums
.mm_suspended
= 1;
1657 i915_setup_sysfs(dev
);
1659 if (INTEL_INFO(dev
)->num_pipes
) {
1660 /* Must be done after probing outputs */
1661 intel_opregion_init(dev
);
1662 acpi_video_register();
1666 intel_gpu_ips_init(dev_priv
);
1668 intel_init_runtime_pm(dev_priv
);
1673 intel_power_domains_remove(dev
);
1674 drm_vblank_cleanup(dev
);
1676 if (dev_priv
->mm
.inactive_shrinker
.scan_objects
)
1677 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
1679 if (dev
->pdev
->msi_enabled
)
1680 pci_disable_msi(dev
->pdev
);
1682 intel_teardown_gmbus(dev
);
1683 intel_teardown_mchbar(dev
);
1684 destroy_workqueue(dev_priv
->wq
);
1686 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1687 io_mapping_free(dev_priv
->gtt
.mappable
);
1689 list_del(&dev_priv
->gtt
.base
.global_link
);
1690 drm_mm_takedown(&dev_priv
->gtt
.base
.mm
);
1691 dev_priv
->gtt
.base
.cleanup(&dev_priv
->gtt
.base
);
1693 intel_uncore_fini(dev
);
1694 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
1696 pci_dev_put(dev_priv
->bridge_dev
);
1699 kmem_cache_destroy(dev_priv
->slab
);
1704 int i915_driver_unload(struct drm_device
*dev
)
1706 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1709 ret
= i915_gem_suspend(dev
);
1711 DRM_ERROR("failed to idle hardware: %d\n", ret
);
1715 intel_fini_runtime_pm(dev_priv
);
1717 intel_gpu_ips_teardown();
1719 /* The i915.ko module is still not prepared to be loaded when
1720 * the power well is not enabled, so just enable it in case
1721 * we're going to unload/reload. */
1722 intel_display_set_init_power(dev
, true);
1723 intel_power_domains_remove(dev
);
1725 i915_teardown_sysfs(dev
);
1727 if (dev_priv
->mm
.inactive_shrinker
.scan_objects
)
1728 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
1730 io_mapping_free(dev_priv
->gtt
.mappable
);
1731 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1733 acpi_video_unregister();
1735 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1736 intel_fbdev_fini(dev
);
1737 intel_modeset_cleanup(dev
);
1738 cancel_work_sync(&dev_priv
->console_resume_work
);
1741 * free the memory space allocated for the child device
1742 * config parsed from VBT
1744 if (dev_priv
->vbt
.child_dev
&& dev_priv
->vbt
.child_dev_num
) {
1745 kfree(dev_priv
->vbt
.child_dev
);
1746 dev_priv
->vbt
.child_dev
= NULL
;
1747 dev_priv
->vbt
.child_dev_num
= 0;
1750 vga_switcheroo_unregister_client(dev
->pdev
);
1751 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1754 /* Free error state after interrupts are fully disabled. */
1755 del_timer_sync(&dev_priv
->gpu_error
.hangcheck_timer
);
1756 cancel_work_sync(&dev_priv
->gpu_error
.work
);
1757 i915_destroy_error_state(dev
);
1759 cancel_delayed_work_sync(&dev_priv
->pc8
.enable_work
);
1761 if (dev
->pdev
->msi_enabled
)
1762 pci_disable_msi(dev
->pdev
);
1764 intel_opregion_fini(dev
);
1766 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1767 /* Flush any outstanding unpin_work. */
1768 flush_workqueue(dev_priv
->wq
);
1770 mutex_lock(&dev
->struct_mutex
);
1771 i915_gem_free_all_phys_object(dev
);
1772 i915_gem_cleanup_ringbuffer(dev
);
1773 i915_gem_context_fini(dev
);
1774 mutex_unlock(&dev
->struct_mutex
);
1775 i915_gem_cleanup_aliasing_ppgtt(dev
);
1776 i915_gem_cleanup_stolen(dev
);
1778 if (!I915_NEED_GFX_HWS(dev
))
1782 list_del(&dev_priv
->gtt
.base
.global_link
);
1783 WARN_ON(!list_empty(&dev_priv
->vm_list
));
1785 drm_vblank_cleanup(dev
);
1787 intel_teardown_gmbus(dev
);
1788 intel_teardown_mchbar(dev
);
1790 destroy_workqueue(dev_priv
->wq
);
1791 pm_qos_remove_request(&dev_priv
->pm_qos
);
1793 dev_priv
->gtt
.base
.cleanup(&dev_priv
->gtt
.base
);
1795 intel_uncore_fini(dev
);
1796 if (dev_priv
->regs
!= NULL
)
1797 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
1800 kmem_cache_destroy(dev_priv
->slab
);
1802 pci_dev_put(dev_priv
->bridge_dev
);
1803 kfree(dev
->dev_private
);
1808 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
1812 ret
= i915_gem_open(dev
, file
);
1820 * i915_driver_lastclose - clean up after all DRM clients have exited
1823 * Take care of cleaning up after all DRM clients have exited. In the
1824 * mode setting case, we want to restore the kernel's initial mode (just
1825 * in case the last client left us in a bad state).
1827 * Additionally, in the non-mode setting case, we'll tear down the GTT
1828 * and DMA structures, since the kernel won't be using them, and clea
1831 void i915_driver_lastclose(struct drm_device
* dev
)
1833 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1835 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1836 * goes right around and calls lastclose. Check for this and don't clean
1841 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1842 intel_fbdev_restore_mode(dev
);
1843 vga_switcheroo_process_delayed_switch();
1847 i915_gem_lastclose(dev
);
1849 i915_dma_cleanup(dev
);
1852 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
1854 i915_gem_context_close(dev
, file_priv
);
1855 i915_gem_release(dev
, file_priv
);
1858 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
1860 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1865 const struct drm_ioctl_desc i915_ioctls
[] = {
1866 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1867 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
1868 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
1869 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
1870 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
1871 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
1872 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1873 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1874 DRM_IOCTL_DEF_DRV(I915_ALLOC
, drm_noop
, DRM_AUTH
),
1875 DRM_IOCTL_DEF_DRV(I915_FREE
, drm_noop
, DRM_AUTH
),
1876 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1877 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
1878 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1879 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1880 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
1881 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
1882 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1883 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1884 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
1885 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1886 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1887 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1888 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1889 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING
, i915_gem_set_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1890 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING
, i915_gem_get_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1891 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1892 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1893 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1894 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1895 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1896 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1897 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1898 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1899 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1900 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1901 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1902 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1903 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1904 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
1905 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1906 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1907 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1908 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY
, intel_sprite_set_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1909 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY
, intel_sprite_get_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1910 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT
, i915_gem_wait_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1911 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE
, i915_gem_context_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1912 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY
, i915_gem_context_destroy_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1913 DRM_IOCTL_DEF_DRV(I915_REG_READ
, i915_reg_read_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1914 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS
, i915_get_reset_stats_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1917 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
1920 * This is really ugly: Because old userspace abused the linux agp interface to
1921 * manage the gtt, we need to claim that all intel devices are agp. For
1922 * otherwise the drm core refuses to initialize the agp support code.
1924 int i915_driver_device_is_agp(struct drm_device
* dev
)