1 /* i915_dma.c -- DMA support for the I915 -*- linux-c -*-
4 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <drm/drm_crtc_helper.h>
33 #include <drm/drm_fb_helper.h>
34 #include "intel_drv.h"
35 #include <drm/i915_drm.h>
37 #include "i915_trace.h"
38 #include <linux/pci.h>
39 #include <linux/vgaarb.h>
40 #include <linux/acpi.h>
41 #include <linux/pnp.h>
42 #include <linux/vga_switcheroo.h>
43 #include <linux/slab.h>
44 #include <acpi/video.h>
46 #include <linux/pm_runtime.h>
48 #define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
50 #define BEGIN_LP_RING(n) \
51 intel_ring_begin(LP_RING(dev_priv), (n))
54 intel_ring_emit(LP_RING(dev_priv), x)
56 #define ADVANCE_LP_RING() \
57 __intel_ring_advance(LP_RING(dev_priv))
60 * Lock test for when it's just for synchronization of ring access.
62 * In that case, we don't need to do it when GEM is initialized as nobody else
63 * has access to the ring.
65 #define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
66 if (LP_RING(dev->dev_private)->obj == NULL) \
67 LOCK_TEST_WITH_RETURN(dev, file); \
71 intel_read_legacy_status_page(struct drm_i915_private
*dev_priv
, int reg
)
73 if (I915_NEED_GFX_HWS(dev_priv
->dev
))
74 return ioread32(dev_priv
->dri1
.gfx_hws_cpu_addr
+ reg
);
76 return intel_read_status_page(LP_RING(dev_priv
), reg
);
79 #define READ_HWSP(dev_priv, reg) intel_read_legacy_status_page(dev_priv, reg)
80 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
81 #define I915_BREADCRUMB_INDEX 0x21
83 void i915_update_dri1_breadcrumb(struct drm_device
*dev
)
85 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
86 struct drm_i915_master_private
*master_priv
;
89 * The dri breadcrumb update races against the drm master disappearing.
90 * Instead of trying to fix this (this is by far not the only ums issue)
91 * just don't do the update in kms mode.
93 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
96 if (dev
->primary
->master
) {
97 master_priv
= dev
->primary
->master
->driver_priv
;
98 if (master_priv
->sarea_priv
)
99 master_priv
->sarea_priv
->last_dispatch
=
100 READ_BREADCRUMB(dev_priv
);
104 static void i915_write_hws_pga(struct drm_device
*dev
)
106 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
109 addr
= dev_priv
->status_page_dmah
->busaddr
;
110 if (INTEL_INFO(dev
)->gen
>= 4)
111 addr
|= (dev_priv
->status_page_dmah
->busaddr
>> 28) & 0xf0;
112 I915_WRITE(HWS_PGA
, addr
);
116 * Frees the hardware status page, whether it's a physical address or a virtual
117 * address set up by the X Server.
119 static void i915_free_hws(struct drm_device
*dev
)
121 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
122 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
124 if (dev_priv
->status_page_dmah
) {
125 drm_pci_free(dev
, dev_priv
->status_page_dmah
);
126 dev_priv
->status_page_dmah
= NULL
;
129 if (ring
->status_page
.gfx_addr
) {
130 ring
->status_page
.gfx_addr
= 0;
131 iounmap(dev_priv
->dri1
.gfx_hws_cpu_addr
);
134 /* Need to rewrite hardware status page */
135 I915_WRITE(HWS_PGA
, 0x1ffff000);
138 void i915_kernel_lost_context(struct drm_device
* dev
)
140 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
141 struct drm_i915_master_private
*master_priv
;
142 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
145 * We should never lose context on the ring with modesetting
146 * as we don't expose it to userspace
148 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
151 ring
->head
= I915_READ_HEAD(ring
) & HEAD_ADDR
;
152 ring
->tail
= I915_READ_TAIL(ring
) & TAIL_ADDR
;
153 ring
->space
= ring
->head
- (ring
->tail
+ I915_RING_FREE_SPACE
);
155 ring
->space
+= ring
->size
;
157 if (!dev
->primary
->master
)
160 master_priv
= dev
->primary
->master
->driver_priv
;
161 if (ring
->head
== ring
->tail
&& master_priv
->sarea_priv
)
162 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_RING_EMPTY
;
165 static int i915_dma_cleanup(struct drm_device
* dev
)
167 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
170 /* Make sure interrupts are disabled here because the uninstall ioctl
171 * may not have been called from userspace and after dev_private
172 * is freed, it's too late.
174 if (dev
->irq_enabled
)
175 drm_irq_uninstall(dev
);
177 mutex_lock(&dev
->struct_mutex
);
178 for (i
= 0; i
< I915_NUM_RINGS
; i
++)
179 intel_cleanup_ring_buffer(&dev_priv
->ring
[i
]);
180 mutex_unlock(&dev
->struct_mutex
);
182 /* Clear the HWS virtual address at teardown */
183 if (I915_NEED_GFX_HWS(dev
))
189 static int i915_initialize(struct drm_device
* dev
, drm_i915_init_t
* init
)
191 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
192 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
195 master_priv
->sarea
= drm_getsarea(dev
);
196 if (master_priv
->sarea
) {
197 master_priv
->sarea_priv
= (drm_i915_sarea_t
*)
198 ((u8
*)master_priv
->sarea
->handle
+ init
->sarea_priv_offset
);
200 DRM_DEBUG_DRIVER("sarea not found assuming DRI2 userspace\n");
203 if (init
->ring_size
!= 0) {
204 if (LP_RING(dev_priv
)->obj
!= NULL
) {
205 i915_dma_cleanup(dev
);
206 DRM_ERROR("Client tried to initialize ringbuffer in "
211 ret
= intel_render_ring_init_dri(dev
,
215 i915_dma_cleanup(dev
);
220 dev_priv
->dri1
.cpp
= init
->cpp
;
221 dev_priv
->dri1
.back_offset
= init
->back_offset
;
222 dev_priv
->dri1
.front_offset
= init
->front_offset
;
223 dev_priv
->dri1
.current_page
= 0;
224 if (master_priv
->sarea_priv
)
225 master_priv
->sarea_priv
->pf_current_page
= 0;
227 /* Allow hardware batchbuffers unless told otherwise.
229 dev_priv
->dri1
.allow_batchbuffer
= 1;
234 static int i915_dma_resume(struct drm_device
* dev
)
236 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
237 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
239 DRM_DEBUG_DRIVER("%s\n", __func__
);
241 if (ring
->virtual_start
== NULL
) {
242 DRM_ERROR("can not ioremap virtual address for"
247 /* Program Hardware Status Page */
248 if (!ring
->status_page
.page_addr
) {
249 DRM_ERROR("Can not find hardware status page\n");
252 DRM_DEBUG_DRIVER("hw status page @ %p\n",
253 ring
->status_page
.page_addr
);
254 if (ring
->status_page
.gfx_addr
!= 0)
255 intel_ring_setup_status_page(ring
);
257 i915_write_hws_pga(dev
);
259 DRM_DEBUG_DRIVER("Enabled hardware status page\n");
264 static int i915_dma_init(struct drm_device
*dev
, void *data
,
265 struct drm_file
*file_priv
)
267 drm_i915_init_t
*init
= data
;
270 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
273 switch (init
->func
) {
275 retcode
= i915_initialize(dev
, init
);
277 case I915_CLEANUP_DMA
:
278 retcode
= i915_dma_cleanup(dev
);
280 case I915_RESUME_DMA
:
281 retcode
= i915_dma_resume(dev
);
291 /* Implement basically the same security restrictions as hardware does
292 * for MI_BATCH_NON_SECURE. These can be made stricter at any time.
294 * Most of the calculations below involve calculating the size of a
295 * particular instruction. It's important to get the size right as
296 * that tells us where the next instruction to check is. Any illegal
297 * instruction detected will be given a size of zero, which is a
298 * signal to abort the rest of the buffer.
300 static int validate_cmd(int cmd
)
302 switch (((cmd
>> 29) & 0x7)) {
304 switch ((cmd
>> 23) & 0x3f) {
306 return 1; /* MI_NOOP */
308 return 1; /* MI_FLUSH */
310 return 0; /* disallow everything else */
314 return 0; /* reserved */
316 return (cmd
& 0xff) + 2; /* 2d commands */
318 if (((cmd
>> 24) & 0x1f) <= 0x18)
321 switch ((cmd
>> 24) & 0x1f) {
325 switch ((cmd
>> 16) & 0xff) {
327 return (cmd
& 0x1f) + 2;
329 return (cmd
& 0xf) + 2;
331 return (cmd
& 0xffff) + 2;
335 return (cmd
& 0xffff) + 1;
339 if ((cmd
& (1 << 23)) == 0) /* inline vertices */
340 return (cmd
& 0x1ffff) + 2;
341 else if (cmd
& (1 << 17)) /* indirect random */
342 if ((cmd
& 0xffff) == 0)
343 return 0; /* unknown length, too hard */
345 return (((cmd
& 0xffff) + 1) / 2) + 1;
347 return 2; /* indirect sequential */
358 static int i915_emit_cmds(struct drm_device
* dev
, int *buffer
, int dwords
)
360 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
363 if ((dwords
+1) * sizeof(int) >= LP_RING(dev_priv
)->size
- 8)
366 for (i
= 0; i
< dwords
;) {
367 int sz
= validate_cmd(buffer
[i
]);
368 if (sz
== 0 || i
+ sz
> dwords
)
373 ret
= BEGIN_LP_RING((dwords
+1)&~1);
377 for (i
= 0; i
< dwords
; i
++)
388 i915_emit_box(struct drm_device
*dev
,
389 struct drm_clip_rect
*box
,
392 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
395 if (box
->y2
<= box
->y1
|| box
->x2
<= box
->x1
||
396 box
->y2
<= 0 || box
->x2
<= 0) {
397 DRM_ERROR("Bad box %d,%d..%d,%d\n",
398 box
->x1
, box
->y1
, box
->x2
, box
->y2
);
402 if (INTEL_INFO(dev
)->gen
>= 4) {
403 ret
= BEGIN_LP_RING(4);
407 OUT_RING(GFX_OP_DRAWRECT_INFO_I965
);
408 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
409 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
412 ret
= BEGIN_LP_RING(6);
416 OUT_RING(GFX_OP_DRAWRECT_INFO
);
418 OUT_RING((box
->x1
& 0xffff) | (box
->y1
<< 16));
419 OUT_RING(((box
->x2
- 1) & 0xffff) | ((box
->y2
- 1) << 16));
428 /* XXX: Emitting the counter should really be moved to part of the IRQ
429 * emit. For now, do it in both places:
432 static void i915_emit_breadcrumb(struct drm_device
*dev
)
434 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
435 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
437 dev_priv
->dri1
.counter
++;
438 if (dev_priv
->dri1
.counter
> 0x7FFFFFFFUL
)
439 dev_priv
->dri1
.counter
= 0;
440 if (master_priv
->sarea_priv
)
441 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
;
443 if (BEGIN_LP_RING(4) == 0) {
444 OUT_RING(MI_STORE_DWORD_INDEX
);
445 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
446 OUT_RING(dev_priv
->dri1
.counter
);
452 static int i915_dispatch_cmdbuffer(struct drm_device
* dev
,
453 drm_i915_cmdbuffer_t
*cmd
,
454 struct drm_clip_rect
*cliprects
,
457 int nbox
= cmd
->num_cliprects
;
458 int i
= 0, count
, ret
;
461 DRM_ERROR("alignment");
465 i915_kernel_lost_context(dev
);
467 count
= nbox
? nbox
: 1;
469 for (i
= 0; i
< count
; i
++) {
471 ret
= i915_emit_box(dev
, &cliprects
[i
],
477 ret
= i915_emit_cmds(dev
, cmdbuf
, cmd
->sz
/ 4);
482 i915_emit_breadcrumb(dev
);
486 static int i915_dispatch_batchbuffer(struct drm_device
* dev
,
487 drm_i915_batchbuffer_t
* batch
,
488 struct drm_clip_rect
*cliprects
)
490 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
491 int nbox
= batch
->num_cliprects
;
494 if ((batch
->start
| batch
->used
) & 0x7) {
495 DRM_ERROR("alignment");
499 i915_kernel_lost_context(dev
);
501 count
= nbox
? nbox
: 1;
502 for (i
= 0; i
< count
; i
++) {
504 ret
= i915_emit_box(dev
, &cliprects
[i
],
505 batch
->DR1
, batch
->DR4
);
510 if (!IS_I830(dev
) && !IS_845G(dev
)) {
511 ret
= BEGIN_LP_RING(2);
515 if (INTEL_INFO(dev
)->gen
>= 4) {
516 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6) | MI_BATCH_NON_SECURE_I965
);
517 OUT_RING(batch
->start
);
519 OUT_RING(MI_BATCH_BUFFER_START
| (2 << 6));
520 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
523 ret
= BEGIN_LP_RING(4);
527 OUT_RING(MI_BATCH_BUFFER
);
528 OUT_RING(batch
->start
| MI_BATCH_NON_SECURE
);
529 OUT_RING(batch
->start
+ batch
->used
- 4);
536 if (IS_G4X(dev
) || IS_GEN5(dev
)) {
537 if (BEGIN_LP_RING(2) == 0) {
538 OUT_RING(MI_FLUSH
| MI_NO_WRITE_FLUSH
| MI_INVALIDATE_ISP
);
544 i915_emit_breadcrumb(dev
);
548 static int i915_dispatch_flip(struct drm_device
* dev
)
550 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
551 struct drm_i915_master_private
*master_priv
=
552 dev
->primary
->master
->driver_priv
;
555 if (!master_priv
->sarea_priv
)
558 DRM_DEBUG_DRIVER("%s: page=%d pfCurrentPage=%d\n",
560 dev_priv
->dri1
.current_page
,
561 master_priv
->sarea_priv
->pf_current_page
);
563 i915_kernel_lost_context(dev
);
565 ret
= BEGIN_LP_RING(10);
569 OUT_RING(MI_FLUSH
| MI_READ_FLUSH
);
572 OUT_RING(CMD_OP_DISPLAYBUFFER_INFO
| ASYNC_FLIP
);
574 if (dev_priv
->dri1
.current_page
== 0) {
575 OUT_RING(dev_priv
->dri1
.back_offset
);
576 dev_priv
->dri1
.current_page
= 1;
578 OUT_RING(dev_priv
->dri1
.front_offset
);
579 dev_priv
->dri1
.current_page
= 0;
583 OUT_RING(MI_WAIT_FOR_EVENT
| MI_WAIT_FOR_PLANE_A_FLIP
);
588 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
++;
590 if (BEGIN_LP_RING(4) == 0) {
591 OUT_RING(MI_STORE_DWORD_INDEX
);
592 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
593 OUT_RING(dev_priv
->dri1
.counter
);
598 master_priv
->sarea_priv
->pf_current_page
= dev_priv
->dri1
.current_page
;
602 static int i915_quiescent(struct drm_device
*dev
)
604 i915_kernel_lost_context(dev
);
605 return intel_ring_idle(LP_RING(dev
->dev_private
));
608 static int i915_flush_ioctl(struct drm_device
*dev
, void *data
,
609 struct drm_file
*file_priv
)
613 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
616 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
618 mutex_lock(&dev
->struct_mutex
);
619 ret
= i915_quiescent(dev
);
620 mutex_unlock(&dev
->struct_mutex
);
625 static int i915_batchbuffer(struct drm_device
*dev
, void *data
,
626 struct drm_file
*file_priv
)
628 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
629 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
630 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
631 master_priv
->sarea_priv
;
632 drm_i915_batchbuffer_t
*batch
= data
;
634 struct drm_clip_rect
*cliprects
= NULL
;
636 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
639 if (!dev_priv
->dri1
.allow_batchbuffer
) {
640 DRM_ERROR("Batchbuffer ioctl disabled\n");
644 DRM_DEBUG_DRIVER("i915 batchbuffer, start %x used %d cliprects %d\n",
645 batch
->start
, batch
->used
, batch
->num_cliprects
);
647 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
649 if (batch
->num_cliprects
< 0)
652 if (batch
->num_cliprects
) {
653 cliprects
= kcalloc(batch
->num_cliprects
,
656 if (cliprects
== NULL
)
659 ret
= copy_from_user(cliprects
, batch
->cliprects
,
660 batch
->num_cliprects
*
661 sizeof(struct drm_clip_rect
));
668 mutex_lock(&dev
->struct_mutex
);
669 ret
= i915_dispatch_batchbuffer(dev
, batch
, cliprects
);
670 mutex_unlock(&dev
->struct_mutex
);
673 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
681 static int i915_cmdbuffer(struct drm_device
*dev
, void *data
,
682 struct drm_file
*file_priv
)
684 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
685 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
686 drm_i915_sarea_t
*sarea_priv
= (drm_i915_sarea_t
*)
687 master_priv
->sarea_priv
;
688 drm_i915_cmdbuffer_t
*cmdbuf
= data
;
689 struct drm_clip_rect
*cliprects
= NULL
;
693 DRM_DEBUG_DRIVER("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
694 cmdbuf
->buf
, cmdbuf
->sz
, cmdbuf
->num_cliprects
);
696 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
699 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
701 if (cmdbuf
->num_cliprects
< 0)
704 batch_data
= kmalloc(cmdbuf
->sz
, GFP_KERNEL
);
705 if (batch_data
== NULL
)
708 ret
= copy_from_user(batch_data
, cmdbuf
->buf
, cmdbuf
->sz
);
711 goto fail_batch_free
;
714 if (cmdbuf
->num_cliprects
) {
715 cliprects
= kcalloc(cmdbuf
->num_cliprects
,
716 sizeof(*cliprects
), GFP_KERNEL
);
717 if (cliprects
== NULL
) {
719 goto fail_batch_free
;
722 ret
= copy_from_user(cliprects
, cmdbuf
->cliprects
,
723 cmdbuf
->num_cliprects
*
724 sizeof(struct drm_clip_rect
));
731 mutex_lock(&dev
->struct_mutex
);
732 ret
= i915_dispatch_cmdbuffer(dev
, cmdbuf
, cliprects
, batch_data
);
733 mutex_unlock(&dev
->struct_mutex
);
735 DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
740 sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
750 static int i915_emit_irq(struct drm_device
* dev
)
752 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
753 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
755 i915_kernel_lost_context(dev
);
757 DRM_DEBUG_DRIVER("\n");
759 dev_priv
->dri1
.counter
++;
760 if (dev_priv
->dri1
.counter
> 0x7FFFFFFFUL
)
761 dev_priv
->dri1
.counter
= 1;
762 if (master_priv
->sarea_priv
)
763 master_priv
->sarea_priv
->last_enqueue
= dev_priv
->dri1
.counter
;
765 if (BEGIN_LP_RING(4) == 0) {
766 OUT_RING(MI_STORE_DWORD_INDEX
);
767 OUT_RING(I915_BREADCRUMB_INDEX
<< MI_STORE_DWORD_INDEX_SHIFT
);
768 OUT_RING(dev_priv
->dri1
.counter
);
769 OUT_RING(MI_USER_INTERRUPT
);
773 return dev_priv
->dri1
.counter
;
776 static int i915_wait_irq(struct drm_device
* dev
, int irq_nr
)
778 drm_i915_private_t
*dev_priv
= (drm_i915_private_t
*) dev
->dev_private
;
779 struct drm_i915_master_private
*master_priv
= dev
->primary
->master
->driver_priv
;
781 struct intel_ring_buffer
*ring
= LP_RING(dev_priv
);
783 DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr
,
784 READ_BREADCRUMB(dev_priv
));
786 if (READ_BREADCRUMB(dev_priv
) >= irq_nr
) {
787 if (master_priv
->sarea_priv
)
788 master_priv
->sarea_priv
->last_dispatch
= READ_BREADCRUMB(dev_priv
);
792 if (master_priv
->sarea_priv
)
793 master_priv
->sarea_priv
->perf_boxes
|= I915_BOX_WAIT
;
795 if (ring
->irq_get(ring
)) {
796 DRM_WAIT_ON(ret
, ring
->irq_queue
, 3 * HZ
,
797 READ_BREADCRUMB(dev_priv
) >= irq_nr
);
799 } else if (wait_for(READ_BREADCRUMB(dev_priv
) >= irq_nr
, 3000))
803 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
804 READ_BREADCRUMB(dev_priv
), (int)dev_priv
->dri1
.counter
);
810 /* Needs the lock as it touches the ring.
812 static int i915_irq_emit(struct drm_device
*dev
, void *data
,
813 struct drm_file
*file_priv
)
815 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
816 drm_i915_irq_emit_t
*emit
= data
;
819 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
822 if (!dev_priv
|| !LP_RING(dev_priv
)->virtual_start
) {
823 DRM_ERROR("called with no initialization\n");
827 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
829 mutex_lock(&dev
->struct_mutex
);
830 result
= i915_emit_irq(dev
);
831 mutex_unlock(&dev
->struct_mutex
);
833 if (copy_to_user(emit
->irq_seq
, &result
, sizeof(int))) {
834 DRM_ERROR("copy_to_user\n");
841 /* Doesn't need the hardware lock.
843 static int i915_irq_wait(struct drm_device
*dev
, void *data
,
844 struct drm_file
*file_priv
)
846 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
847 drm_i915_irq_wait_t
*irqwait
= data
;
849 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
853 DRM_ERROR("called with no initialization\n");
857 return i915_wait_irq(dev
, irqwait
->irq_seq
);
860 static int i915_vblank_pipe_get(struct drm_device
*dev
, void *data
,
861 struct drm_file
*file_priv
)
863 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
864 drm_i915_vblank_pipe_t
*pipe
= data
;
866 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
870 DRM_ERROR("called with no initialization\n");
874 pipe
->pipe
= DRM_I915_VBLANK_PIPE_A
| DRM_I915_VBLANK_PIPE_B
;
880 * Schedule buffer swap at given vertical blank.
882 static int i915_vblank_swap(struct drm_device
*dev
, void *data
,
883 struct drm_file
*file_priv
)
885 /* The delayed swap mechanism was fundamentally racy, and has been
886 * removed. The model was that the client requested a delayed flip/swap
887 * from the kernel, then waited for vblank before continuing to perform
888 * rendering. The problem was that the kernel might wake the client
889 * up before it dispatched the vblank swap (since the lock has to be
890 * held while touching the ringbuffer), in which case the client would
891 * clear and start the next frame before the swap occurred, and
892 * flicker would occur in addition to likely missing the vblank.
894 * In the absence of this ioctl, userland falls back to a correct path
895 * of waiting for a vblank, then dispatching the swap on its own.
896 * Context switching to userland and back is plenty fast enough for
897 * meeting the requirements of vblank swapping.
902 static int i915_flip_bufs(struct drm_device
*dev
, void *data
,
903 struct drm_file
*file_priv
)
907 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
910 DRM_DEBUG_DRIVER("%s\n", __func__
);
912 RING_LOCK_TEST_WITH_RETURN(dev
, file_priv
);
914 mutex_lock(&dev
->struct_mutex
);
915 ret
= i915_dispatch_flip(dev
);
916 mutex_unlock(&dev
->struct_mutex
);
921 static int i915_getparam(struct drm_device
*dev
, void *data
,
922 struct drm_file
*file_priv
)
924 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
925 drm_i915_getparam_t
*param
= data
;
929 DRM_ERROR("called with no initialization\n");
933 switch (param
->param
) {
934 case I915_PARAM_IRQ_ACTIVE
:
935 value
= dev
->pdev
->irq
? 1 : 0;
937 case I915_PARAM_ALLOW_BATCHBUFFER
:
938 value
= dev_priv
->dri1
.allow_batchbuffer
? 1 : 0;
940 case I915_PARAM_LAST_DISPATCH
:
941 value
= READ_BREADCRUMB(dev_priv
);
943 case I915_PARAM_CHIPSET_ID
:
944 value
= dev
->pdev
->device
;
946 case I915_PARAM_HAS_GEM
:
949 case I915_PARAM_NUM_FENCES_AVAIL
:
950 value
= dev_priv
->num_fence_regs
- dev_priv
->fence_reg_start
;
952 case I915_PARAM_HAS_OVERLAY
:
953 value
= dev_priv
->overlay
? 1 : 0;
955 case I915_PARAM_HAS_PAGEFLIPPING
:
958 case I915_PARAM_HAS_EXECBUF2
:
962 case I915_PARAM_HAS_BSD
:
963 value
= intel_ring_initialized(&dev_priv
->ring
[VCS
]);
965 case I915_PARAM_HAS_BLT
:
966 value
= intel_ring_initialized(&dev_priv
->ring
[BCS
]);
968 case I915_PARAM_HAS_VEBOX
:
969 value
= intel_ring_initialized(&dev_priv
->ring
[VECS
]);
971 case I915_PARAM_HAS_RELAXED_FENCING
:
974 case I915_PARAM_HAS_COHERENT_RINGS
:
977 case I915_PARAM_HAS_EXEC_CONSTANTS
:
978 value
= INTEL_INFO(dev
)->gen
>= 4;
980 case I915_PARAM_HAS_RELAXED_DELTA
:
983 case I915_PARAM_HAS_GEN7_SOL_RESET
:
986 case I915_PARAM_HAS_LLC
:
987 value
= HAS_LLC(dev
);
989 case I915_PARAM_HAS_WT
:
992 case I915_PARAM_HAS_ALIASING_PPGTT
:
993 value
= dev_priv
->mm
.aliasing_ppgtt
? 1 : 0;
995 case I915_PARAM_HAS_WAIT_TIMEOUT
:
998 case I915_PARAM_HAS_SEMAPHORES
:
999 value
= i915_semaphore_is_enabled(dev
);
1001 case I915_PARAM_HAS_PRIME_VMAP_FLUSH
:
1004 case I915_PARAM_HAS_SECURE_BATCHES
:
1005 value
= capable(CAP_SYS_ADMIN
);
1007 case I915_PARAM_HAS_PINNED_BATCHES
:
1010 case I915_PARAM_HAS_EXEC_NO_RELOC
:
1013 case I915_PARAM_HAS_EXEC_HANDLE_LUT
:
1017 DRM_DEBUG("Unknown parameter %d\n", param
->param
);
1021 if (copy_to_user(param
->value
, &value
, sizeof(int))) {
1022 DRM_ERROR("copy_to_user failed\n");
1029 static int i915_setparam(struct drm_device
*dev
, void *data
,
1030 struct drm_file
*file_priv
)
1032 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1033 drm_i915_setparam_t
*param
= data
;
1036 DRM_ERROR("called with no initialization\n");
1040 switch (param
->param
) {
1041 case I915_SETPARAM_USE_MI_BATCHBUFFER_START
:
1043 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY
:
1045 case I915_SETPARAM_ALLOW_BATCHBUFFER
:
1046 dev_priv
->dri1
.allow_batchbuffer
= param
->value
? 1 : 0;
1048 case I915_SETPARAM_NUM_USED_FENCES
:
1049 if (param
->value
> dev_priv
->num_fence_regs
||
1052 /* Userspace can use first N regs */
1053 dev_priv
->fence_reg_start
= param
->value
;
1056 DRM_DEBUG_DRIVER("unknown parameter %d\n",
1064 static int i915_set_status_page(struct drm_device
*dev
, void *data
,
1065 struct drm_file
*file_priv
)
1067 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1068 drm_i915_hws_addr_t
*hws
= data
;
1069 struct intel_ring_buffer
*ring
;
1071 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1074 if (!I915_NEED_GFX_HWS(dev
))
1078 DRM_ERROR("called with no initialization\n");
1082 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1083 WARN(1, "tried to set status page when mode setting active\n");
1087 DRM_DEBUG_DRIVER("set status page addr 0x%08x\n", (u32
)hws
->addr
);
1089 ring
= LP_RING(dev_priv
);
1090 ring
->status_page
.gfx_addr
= hws
->addr
& (0x1ffff<<12);
1092 dev_priv
->dri1
.gfx_hws_cpu_addr
=
1093 ioremap_wc(dev_priv
->gtt
.mappable_base
+ hws
->addr
, 4096);
1094 if (dev_priv
->dri1
.gfx_hws_cpu_addr
== NULL
) {
1095 i915_dma_cleanup(dev
);
1096 ring
->status_page
.gfx_addr
= 0;
1097 DRM_ERROR("can not ioremap virtual address for"
1098 " G33 hw status page\n");
1102 memset_io(dev_priv
->dri1
.gfx_hws_cpu_addr
, 0, PAGE_SIZE
);
1103 I915_WRITE(HWS_PGA
, ring
->status_page
.gfx_addr
);
1105 DRM_DEBUG_DRIVER("load hws HWS_PGA with gfx mem 0x%x\n",
1106 ring
->status_page
.gfx_addr
);
1107 DRM_DEBUG_DRIVER("load hws at %p\n",
1108 ring
->status_page
.page_addr
);
1112 static int i915_get_bridge_dev(struct drm_device
*dev
)
1114 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1116 dev_priv
->bridge_dev
= pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
1117 if (!dev_priv
->bridge_dev
) {
1118 DRM_ERROR("bridge device not found\n");
1124 #define MCHBAR_I915 0x44
1125 #define MCHBAR_I965 0x48
1126 #define MCHBAR_SIZE (4*4096)
1128 #define DEVEN_REG 0x54
1129 #define DEVEN_MCHBAR_EN (1 << 28)
1131 /* Allocate space for the MCH regs if needed, return nonzero on error */
1133 intel_alloc_mchbar_resource(struct drm_device
*dev
)
1135 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1136 int reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1137 u32 temp_lo
, temp_hi
= 0;
1141 if (INTEL_INFO(dev
)->gen
>= 4)
1142 pci_read_config_dword(dev_priv
->bridge_dev
, reg
+ 4, &temp_hi
);
1143 pci_read_config_dword(dev_priv
->bridge_dev
, reg
, &temp_lo
);
1144 mchbar_addr
= ((u64
)temp_hi
<< 32) | temp_lo
;
1146 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
1149 pnp_range_reserved(mchbar_addr
, mchbar_addr
+ MCHBAR_SIZE
))
1153 /* Get some space for it */
1154 dev_priv
->mch_res
.name
= "i915 MCHBAR";
1155 dev_priv
->mch_res
.flags
= IORESOURCE_MEM
;
1156 ret
= pci_bus_alloc_resource(dev_priv
->bridge_dev
->bus
,
1158 MCHBAR_SIZE
, MCHBAR_SIZE
,
1160 0, pcibios_align_resource
,
1161 dev_priv
->bridge_dev
);
1163 DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret
);
1164 dev_priv
->mch_res
.start
= 0;
1168 if (INTEL_INFO(dev
)->gen
>= 4)
1169 pci_write_config_dword(dev_priv
->bridge_dev
, reg
+ 4,
1170 upper_32_bits(dev_priv
->mch_res
.start
));
1172 pci_write_config_dword(dev_priv
->bridge_dev
, reg
,
1173 lower_32_bits(dev_priv
->mch_res
.start
));
1177 /* Setup MCHBAR if possible, return true if we should disable it again */
1179 intel_setup_mchbar(struct drm_device
*dev
)
1181 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1182 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1186 dev_priv
->mchbar_need_disable
= false;
1188 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1189 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
1190 enabled
= !!(temp
& DEVEN_MCHBAR_EN
);
1192 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1196 /* If it's already enabled, don't have to do anything */
1200 if (intel_alloc_mchbar_resource(dev
))
1203 dev_priv
->mchbar_need_disable
= true;
1205 /* Space is allocated or reserved, so enable it. */
1206 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1207 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
,
1208 temp
| DEVEN_MCHBAR_EN
);
1210 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1211 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
| 1);
1216 intel_teardown_mchbar(struct drm_device
*dev
)
1218 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1219 int mchbar_reg
= INTEL_INFO(dev
)->gen
>= 4 ? MCHBAR_I965
: MCHBAR_I915
;
1222 if (dev_priv
->mchbar_need_disable
) {
1223 if (IS_I915G(dev
) || IS_I915GM(dev
)) {
1224 pci_read_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, &temp
);
1225 temp
&= ~DEVEN_MCHBAR_EN
;
1226 pci_write_config_dword(dev_priv
->bridge_dev
, DEVEN_REG
, temp
);
1228 pci_read_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, &temp
);
1230 pci_write_config_dword(dev_priv
->bridge_dev
, mchbar_reg
, temp
);
1234 if (dev_priv
->mch_res
.start
)
1235 release_resource(&dev_priv
->mch_res
);
1238 /* true = enable decode, false = disable decoder */
1239 static unsigned int i915_vga_set_decode(void *cookie
, bool state
)
1241 struct drm_device
*dev
= cookie
;
1243 intel_modeset_vga_set_state(dev
, state
);
1245 return VGA_RSRC_LEGACY_IO
| VGA_RSRC_LEGACY_MEM
|
1246 VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1248 return VGA_RSRC_NORMAL_IO
| VGA_RSRC_NORMAL_MEM
;
1251 static void i915_switcheroo_set_state(struct pci_dev
*pdev
, enum vga_switcheroo_state state
)
1253 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1254 pm_message_t pmm
= { .event
= PM_EVENT_SUSPEND
};
1255 if (state
== VGA_SWITCHEROO_ON
) {
1256 pr_info("switched on\n");
1257 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1258 /* i915 resume handler doesn't set to D0 */
1259 pci_set_power_state(dev
->pdev
, PCI_D0
);
1261 dev
->switch_power_state
= DRM_SWITCH_POWER_ON
;
1263 pr_err("switched off\n");
1264 dev
->switch_power_state
= DRM_SWITCH_POWER_CHANGING
;
1265 i915_suspend(dev
, pmm
);
1266 dev
->switch_power_state
= DRM_SWITCH_POWER_OFF
;
1270 static bool i915_switcheroo_can_switch(struct pci_dev
*pdev
)
1272 struct drm_device
*dev
= pci_get_drvdata(pdev
);
1275 spin_lock(&dev
->count_lock
);
1276 can_switch
= (dev
->open_count
== 0);
1277 spin_unlock(&dev
->count_lock
);
1281 static const struct vga_switcheroo_client_ops i915_switcheroo_ops
= {
1282 .set_gpu_state
= i915_switcheroo_set_state
,
1284 .can_switch
= i915_switcheroo_can_switch
,
1287 static int i915_load_modeset_init(struct drm_device
*dev
)
1289 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1292 ret
= intel_parse_bios(dev
);
1294 DRM_INFO("failed to find VBIOS tables\n");
1296 /* If we have > 1 VGA cards, then we need to arbitrate access
1297 * to the common VGA resources.
1299 * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA),
1300 * then we do not take part in VGA arbitration and the
1301 * vga_client_register() fails with -ENODEV.
1303 ret
= vga_client_register(dev
->pdev
, dev
, NULL
, i915_vga_set_decode
);
1304 if (ret
&& ret
!= -ENODEV
)
1307 intel_register_dsm_handler();
1309 ret
= vga_switcheroo_register_client(dev
->pdev
, &i915_switcheroo_ops
, false);
1311 goto cleanup_vga_client
;
1313 /* Initialise stolen first so that we may reserve preallocated
1314 * objects for the BIOS to KMS transition.
1316 ret
= i915_gem_init_stolen(dev
);
1318 goto cleanup_vga_switcheroo
;
1320 ret
= drm_irq_install(dev
);
1322 goto cleanup_gem_stolen
;
1324 intel_power_domains_init_hw(dev
);
1326 /* Important: The output setup functions called by modeset_init need
1327 * working irqs for e.g. gmbus and dp aux transfers. */
1328 intel_modeset_init(dev
);
1330 ret
= i915_gem_init(dev
);
1334 INIT_WORK(&dev_priv
->console_resume_work
, intel_console_resume
);
1336 intel_modeset_gem_init(dev
);
1338 /* Always safe in the mode setting case. */
1339 /* FIXME: do pre/post-mode set stuff in core KMS code */
1340 dev
->vblank_disable_allowed
= true;
1341 if (INTEL_INFO(dev
)->num_pipes
== 0) {
1342 intel_display_power_put(dev
, POWER_DOMAIN_VGA
);
1346 ret
= intel_fbdev_init(dev
);
1350 /* Only enable hotplug handling once the fbdev is fully set up. */
1351 intel_hpd_init(dev
);
1354 * Some ports require correctly set-up hpd registers for detection to
1355 * work properly (leading to ghost connected connector status), e.g. VGA
1356 * on gm45. Hence we can only set up the initial fbdev config after hpd
1357 * irqs are fully enabled. Now we should scan for the initial config
1358 * only once hotplug handling is enabled, but due to screwed-up locking
1359 * around kms/fbdev init we can't protect the fdbev initial config
1360 * scanning against hotplug events. Hence do this first and ignore the
1361 * tiny window where we will loose hotplug notifactions.
1363 intel_fbdev_initial_config(dev
);
1365 /* Only enable hotplug handling once the fbdev is fully set up. */
1366 dev_priv
->enable_hotplug_processing
= true;
1368 drm_kms_helper_poll_init(dev
);
1373 mutex_lock(&dev
->struct_mutex
);
1374 i915_gem_cleanup_ringbuffer(dev
);
1375 i915_gem_context_fini(dev
);
1376 mutex_unlock(&dev
->struct_mutex
);
1377 i915_gem_cleanup_aliasing_ppgtt(dev
);
1378 drm_mm_takedown(&dev_priv
->gtt
.base
.mm
);
1380 intel_display_power_put(dev
, POWER_DOMAIN_VGA
);
1381 drm_irq_uninstall(dev
);
1383 i915_gem_cleanup_stolen(dev
);
1384 cleanup_vga_switcheroo
:
1385 vga_switcheroo_unregister_client(dev
->pdev
);
1387 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1392 int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
)
1394 struct drm_i915_master_private
*master_priv
;
1396 master_priv
= kzalloc(sizeof(*master_priv
), GFP_KERNEL
);
1400 master
->driver_priv
= master_priv
;
1404 void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
)
1406 struct drm_i915_master_private
*master_priv
= master
->driver_priv
;
1413 master
->driver_priv
= NULL
;
1416 #if IS_ENABLED(CONFIG_FB)
1417 static void i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
1419 struct apertures_struct
*ap
;
1420 struct pci_dev
*pdev
= dev_priv
->dev
->pdev
;
1423 ap
= alloc_apertures(1);
1427 ap
->ranges
[0].base
= dev_priv
->gtt
.mappable_base
;
1428 ap
->ranges
[0].size
= dev_priv
->gtt
.mappable_end
;
1431 pdev
->resource
[PCI_ROM_RESOURCE
].flags
& IORESOURCE_ROM_SHADOW
;
1433 remove_conflicting_framebuffers(ap
, "inteldrmfb", primary
);
1438 static void i915_kick_out_firmware_fb(struct drm_i915_private
*dev_priv
)
1443 static void i915_dump_device_info(struct drm_i915_private
*dev_priv
)
1445 const struct intel_device_info
*info
= dev_priv
->info
;
1447 #define PRINT_S(name) "%s"
1449 #define PRINT_FLAG(name) info->name ? #name "," : ""
1451 DRM_DEBUG_DRIVER("i915 device info: gen=%i, pciid=0x%04x flags="
1452 DEV_INFO_FOR_EACH_FLAG(PRINT_S
, SEP_EMPTY
),
1454 dev_priv
->dev
->pdev
->device
,
1455 DEV_INFO_FOR_EACH_FLAG(PRINT_FLAG
, SEP_COMMA
));
1463 * i915_driver_load - setup chip and create an initial config
1465 * @flags: startup flags
1467 * The driver load routine has to do several things:
1468 * - drive output discovery via intel_modeset_init()
1469 * - initialize the memory manager
1470 * - allocate initial config memory
1471 * - setup the DRM framebuffer with the allocated memory
1473 int i915_driver_load(struct drm_device
*dev
, unsigned long flags
)
1475 struct drm_i915_private
*dev_priv
;
1476 struct intel_device_info
*info
;
1477 int ret
= 0, mmio_bar
, mmio_size
;
1478 uint32_t aperture_size
;
1480 info
= (struct intel_device_info
*) flags
;
1482 /* Refuse to load on gen6+ without kms enabled. */
1483 if (info
->gen
>= 6 && !drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1484 DRM_INFO("Your hardware requires kernel modesetting (KMS)\n");
1485 DRM_INFO("See CONFIG_DRM_I915_KMS, nomodeset, and i915.modeset parameters\n");
1489 /* UMS needs agp support. */
1490 if (!drm_core_check_feature(dev
, DRIVER_MODESET
) && !dev
->agp
)
1493 dev_priv
= kzalloc(sizeof(*dev_priv
), GFP_KERNEL
);
1494 if (dev_priv
== NULL
)
1497 dev
->dev_private
= (void *)dev_priv
;
1498 dev_priv
->dev
= dev
;
1499 dev_priv
->info
= info
;
1501 spin_lock_init(&dev_priv
->irq_lock
);
1502 spin_lock_init(&dev_priv
->gpu_error
.lock
);
1503 spin_lock_init(&dev_priv
->backlight_lock
);
1504 spin_lock_init(&dev_priv
->uncore
.lock
);
1505 spin_lock_init(&dev_priv
->mm
.object_stat_lock
);
1506 mutex_init(&dev_priv
->dpio_lock
);
1507 mutex_init(&dev_priv
->modeset_restore_lock
);
1509 intel_pm_setup(dev
);
1511 intel_display_crc_init(dev
);
1513 i915_dump_device_info(dev_priv
);
1515 /* Not all pre-production machines fall into this category, only the
1516 * very first ones. Almost everything should work, except for maybe
1517 * suspend/resume. And we don't implement workarounds that affect only
1518 * pre-production machines. */
1519 if (IS_HSW_EARLY_SDV(dev
))
1520 DRM_INFO("This is an early pre-production Haswell machine. "
1521 "It may not be fully functional.\n");
1523 if (i915_get_bridge_dev(dev
)) {
1528 mmio_bar
= IS_GEN2(dev
) ? 1 : 0;
1529 /* Before gen4, the registers and the GTT are behind different BARs.
1530 * However, from gen4 onwards, the registers and the GTT are shared
1531 * in the same BAR, so we want to restrict this ioremap from
1532 * clobbering the GTT which we want ioremap_wc instead. Fortunately,
1533 * the register BAR remains the same size for all the earlier
1534 * generations up to Ironlake.
1537 mmio_size
= 512*1024;
1539 mmio_size
= 2*1024*1024;
1541 dev_priv
->regs
= pci_iomap(dev
->pdev
, mmio_bar
, mmio_size
);
1542 if (!dev_priv
->regs
) {
1543 DRM_ERROR("failed to map registers\n");
1548 intel_uncore_early_sanitize(dev
);
1550 /* This must be called before any calls to HAS_PCH_* */
1551 intel_detect_pch(dev
);
1553 intel_uncore_init(dev
);
1555 ret
= i915_gem_gtt_init(dev
);
1559 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1560 i915_kick_out_firmware_fb(dev_priv
);
1562 pci_set_master(dev
->pdev
);
1564 /* overlay on gen2 is broken and can't address above 1G */
1566 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(30));
1568 /* 965GM sometimes incorrectly writes to hardware status page (HWS)
1569 * using 32bit addressing, overwriting memory if HWS is located
1572 * The documentation also mentions an issue with undefined
1573 * behaviour if any general state is accessed within a page above 4GB,
1574 * which also needs to be handled carefully.
1576 if (IS_BROADWATER(dev
) || IS_CRESTLINE(dev
))
1577 dma_set_coherent_mask(&dev
->pdev
->dev
, DMA_BIT_MASK(32));
1579 aperture_size
= dev_priv
->gtt
.mappable_end
;
1581 dev_priv
->gtt
.mappable
=
1582 io_mapping_create_wc(dev_priv
->gtt
.mappable_base
,
1584 if (dev_priv
->gtt
.mappable
== NULL
) {
1589 dev_priv
->gtt
.mtrr
= arch_phys_wc_add(dev_priv
->gtt
.mappable_base
,
1592 /* The i915 workqueue is primarily used for batched retirement of
1593 * requests (and thus managing bo) once the task has been completed
1594 * by the GPU. i915_gem_retire_requests() is called directly when we
1595 * need high-priority retirement, such as waiting for an explicit
1598 * It is also used for periodic low-priority events, such as
1599 * idle-timers and recording error state.
1601 * All tasks on the workqueue are expected to acquire the dev mutex
1602 * so there is no point in running more than one instance of the
1603 * workqueue at any time. Use an ordered one.
1605 dev_priv
->wq
= alloc_ordered_workqueue("i915", 0);
1606 if (dev_priv
->wq
== NULL
) {
1607 DRM_ERROR("Failed to create our workqueue.\n");
1612 intel_irq_init(dev
);
1613 intel_uncore_sanitize(dev
);
1615 /* Try to make sure MCHBAR is enabled before poking at it */
1616 intel_setup_mchbar(dev
);
1617 intel_setup_gmbus(dev
);
1618 intel_opregion_setup(dev
);
1620 intel_setup_bios(dev
);
1624 /* On the 945G/GM, the chipset reports the MSI capability on the
1625 * integrated graphics even though the support isn't actually there
1626 * according to the published specs. It doesn't appear to function
1627 * correctly in testing on 945G.
1628 * This may be a side effect of MSI having been made available for PEG
1629 * and the registers being closely associated.
1631 * According to chipset errata, on the 965GM, MSI interrupts may
1632 * be lost or delayed, but we use them anyways to avoid
1633 * stuck interrupts on some machines.
1635 if (!IS_I945G(dev
) && !IS_I945GM(dev
))
1636 pci_enable_msi(dev
->pdev
);
1638 dev_priv
->num_plane
= 1;
1639 if (IS_VALLEYVIEW(dev
))
1640 dev_priv
->num_plane
= 2;
1642 if (INTEL_INFO(dev
)->num_pipes
) {
1643 ret
= drm_vblank_init(dev
, INTEL_INFO(dev
)->num_pipes
);
1645 goto out_gem_unload
;
1648 intel_power_domains_init(dev
);
1650 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1651 ret
= i915_load_modeset_init(dev
);
1653 DRM_ERROR("failed to init modeset\n");
1654 goto out_power_well
;
1657 /* Start out suspended in ums mode. */
1658 dev_priv
->ums
.mm_suspended
= 1;
1661 i915_setup_sysfs(dev
);
1663 if (INTEL_INFO(dev
)->num_pipes
) {
1664 /* Must be done after probing outputs */
1665 intel_opregion_init(dev
);
1666 acpi_video_register();
1670 intel_gpu_ips_init(dev_priv
);
1672 intel_init_runtime_pm(dev_priv
);
1677 intel_power_domains_remove(dev
);
1678 drm_vblank_cleanup(dev
);
1680 if (dev_priv
->mm
.inactive_shrinker
.scan_objects
)
1681 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
1683 if (dev
->pdev
->msi_enabled
)
1684 pci_disable_msi(dev
->pdev
);
1686 intel_teardown_gmbus(dev
);
1687 intel_teardown_mchbar(dev
);
1688 destroy_workqueue(dev_priv
->wq
);
1690 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1691 io_mapping_free(dev_priv
->gtt
.mappable
);
1693 list_del(&dev_priv
->gtt
.base
.global_link
);
1694 drm_mm_takedown(&dev_priv
->gtt
.base
.mm
);
1695 dev_priv
->gtt
.base
.cleanup(&dev_priv
->gtt
.base
);
1697 intel_uncore_fini(dev
);
1698 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
1700 pci_dev_put(dev_priv
->bridge_dev
);
1703 kmem_cache_destroy(dev_priv
->slab
);
1708 int i915_driver_unload(struct drm_device
*dev
)
1710 struct drm_i915_private
*dev_priv
= dev
->dev_private
;
1713 ret
= i915_gem_suspend(dev
);
1715 DRM_ERROR("failed to idle hardware: %d\n", ret
);
1719 intel_fini_runtime_pm(dev_priv
);
1721 intel_gpu_ips_teardown();
1723 /* The i915.ko module is still not prepared to be loaded when
1724 * the power well is not enabled, so just enable it in case
1725 * we're going to unload/reload. */
1726 intel_display_set_init_power(dev
, true);
1727 intel_power_domains_remove(dev
);
1729 i915_teardown_sysfs(dev
);
1731 if (dev_priv
->mm
.inactive_shrinker
.scan_objects
)
1732 unregister_shrinker(&dev_priv
->mm
.inactive_shrinker
);
1734 io_mapping_free(dev_priv
->gtt
.mappable
);
1735 arch_phys_wc_del(dev_priv
->gtt
.mtrr
);
1737 acpi_video_unregister();
1739 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1740 intel_fbdev_fini(dev
);
1741 intel_modeset_cleanup(dev
);
1742 cancel_work_sync(&dev_priv
->console_resume_work
);
1745 * free the memory space allocated for the child device
1746 * config parsed from VBT
1748 if (dev_priv
->vbt
.child_dev
&& dev_priv
->vbt
.child_dev_num
) {
1749 kfree(dev_priv
->vbt
.child_dev
);
1750 dev_priv
->vbt
.child_dev
= NULL
;
1751 dev_priv
->vbt
.child_dev_num
= 0;
1754 vga_switcheroo_unregister_client(dev
->pdev
);
1755 vga_client_register(dev
->pdev
, NULL
, NULL
, NULL
);
1758 /* Free error state after interrupts are fully disabled. */
1759 del_timer_sync(&dev_priv
->gpu_error
.hangcheck_timer
);
1760 cancel_work_sync(&dev_priv
->gpu_error
.work
);
1761 i915_destroy_error_state(dev
);
1763 cancel_delayed_work_sync(&dev_priv
->pc8
.enable_work
);
1765 if (dev
->pdev
->msi_enabled
)
1766 pci_disable_msi(dev
->pdev
);
1768 intel_opregion_fini(dev
);
1770 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1771 /* Flush any outstanding unpin_work. */
1772 flush_workqueue(dev_priv
->wq
);
1774 mutex_lock(&dev
->struct_mutex
);
1775 i915_gem_free_all_phys_object(dev
);
1776 i915_gem_cleanup_ringbuffer(dev
);
1777 i915_gem_context_fini(dev
);
1778 mutex_unlock(&dev
->struct_mutex
);
1779 i915_gem_cleanup_aliasing_ppgtt(dev
);
1780 i915_gem_cleanup_stolen(dev
);
1782 if (!I915_NEED_GFX_HWS(dev
))
1786 list_del(&dev_priv
->gtt
.base
.global_link
);
1787 WARN_ON(!list_empty(&dev_priv
->vm_list
));
1789 drm_vblank_cleanup(dev
);
1791 intel_teardown_gmbus(dev
);
1792 intel_teardown_mchbar(dev
);
1794 destroy_workqueue(dev_priv
->wq
);
1795 pm_qos_remove_request(&dev_priv
->pm_qos
);
1797 dev_priv
->gtt
.base
.cleanup(&dev_priv
->gtt
.base
);
1799 intel_uncore_fini(dev
);
1800 if (dev_priv
->regs
!= NULL
)
1801 pci_iounmap(dev
->pdev
, dev_priv
->regs
);
1804 kmem_cache_destroy(dev_priv
->slab
);
1806 pci_dev_put(dev_priv
->bridge_dev
);
1807 kfree(dev
->dev_private
);
1812 int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file
)
1816 ret
= i915_gem_open(dev
, file
);
1824 * i915_driver_lastclose - clean up after all DRM clients have exited
1827 * Take care of cleaning up after all DRM clients have exited. In the
1828 * mode setting case, we want to restore the kernel's initial mode (just
1829 * in case the last client left us in a bad state).
1831 * Additionally, in the non-mode setting case, we'll tear down the GTT
1832 * and DMA structures, since the kernel won't be using them, and clea
1835 void i915_driver_lastclose(struct drm_device
* dev
)
1837 drm_i915_private_t
*dev_priv
= dev
->dev_private
;
1839 /* On gen6+ we refuse to init without kms enabled, but then the drm core
1840 * goes right around and calls lastclose. Check for this and don't clean
1845 if (drm_core_check_feature(dev
, DRIVER_MODESET
)) {
1846 intel_fbdev_restore_mode(dev
);
1847 vga_switcheroo_process_delayed_switch();
1851 i915_gem_lastclose(dev
);
1853 i915_dma_cleanup(dev
);
1856 void i915_driver_preclose(struct drm_device
* dev
, struct drm_file
*file_priv
)
1858 mutex_lock(&dev
->struct_mutex
);
1859 i915_gem_context_close(dev
, file_priv
);
1860 i915_gem_release(dev
, file_priv
);
1861 mutex_unlock(&dev
->struct_mutex
);
1864 void i915_driver_postclose(struct drm_device
*dev
, struct drm_file
*file
)
1866 struct drm_i915_file_private
*file_priv
= file
->driver_priv
;
1871 const struct drm_ioctl_desc i915_ioctls
[] = {
1872 DRM_IOCTL_DEF_DRV(I915_INIT
, i915_dma_init
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1873 DRM_IOCTL_DEF_DRV(I915_FLUSH
, i915_flush_ioctl
, DRM_AUTH
),
1874 DRM_IOCTL_DEF_DRV(I915_FLIP
, i915_flip_bufs
, DRM_AUTH
),
1875 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER
, i915_batchbuffer
, DRM_AUTH
),
1876 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT
, i915_irq_emit
, DRM_AUTH
),
1877 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT
, i915_irq_wait
, DRM_AUTH
),
1878 DRM_IOCTL_DEF_DRV(I915_GETPARAM
, i915_getparam
, DRM_AUTH
|DRM_RENDER_ALLOW
),
1879 DRM_IOCTL_DEF_DRV(I915_SETPARAM
, i915_setparam
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1880 DRM_IOCTL_DEF_DRV(I915_ALLOC
, drm_noop
, DRM_AUTH
),
1881 DRM_IOCTL_DEF_DRV(I915_FREE
, drm_noop
, DRM_AUTH
),
1882 DRM_IOCTL_DEF_DRV(I915_INIT_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1883 DRM_IOCTL_DEF_DRV(I915_CMDBUFFER
, i915_cmdbuffer
, DRM_AUTH
),
1884 DRM_IOCTL_DEF_DRV(I915_DESTROY_HEAP
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1885 DRM_IOCTL_DEF_DRV(I915_SET_VBLANK_PIPE
, drm_noop
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1886 DRM_IOCTL_DEF_DRV(I915_GET_VBLANK_PIPE
, i915_vblank_pipe_get
, DRM_AUTH
),
1887 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP
, i915_vblank_swap
, DRM_AUTH
),
1888 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR
, i915_set_status_page
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
),
1889 DRM_IOCTL_DEF_DRV(I915_GEM_INIT
, i915_gem_init_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1890 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER
, i915_gem_execbuffer
, DRM_AUTH
|DRM_UNLOCKED
),
1891 DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2
, i915_gem_execbuffer2
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1892 DRM_IOCTL_DEF_DRV(I915_GEM_PIN
, i915_gem_pin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1893 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN
, i915_gem_unpin_ioctl
, DRM_AUTH
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1894 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY
, i915_gem_busy_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1895 DRM_IOCTL_DEF_DRV(I915_GEM_SET_CACHING
, i915_gem_set_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1896 DRM_IOCTL_DEF_DRV(I915_GEM_GET_CACHING
, i915_gem_get_caching_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1897 DRM_IOCTL_DEF_DRV(I915_GEM_THROTTLE
, i915_gem_throttle_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1898 DRM_IOCTL_DEF_DRV(I915_GEM_ENTERVT
, i915_gem_entervt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1899 DRM_IOCTL_DEF_DRV(I915_GEM_LEAVEVT
, i915_gem_leavevt_ioctl
, DRM_AUTH
|DRM_MASTER
|DRM_ROOT_ONLY
|DRM_UNLOCKED
),
1900 DRM_IOCTL_DEF_DRV(I915_GEM_CREATE
, i915_gem_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1901 DRM_IOCTL_DEF_DRV(I915_GEM_PREAD
, i915_gem_pread_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1902 DRM_IOCTL_DEF_DRV(I915_GEM_PWRITE
, i915_gem_pwrite_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1903 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP
, i915_gem_mmap_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1904 DRM_IOCTL_DEF_DRV(I915_GEM_MMAP_GTT
, i915_gem_mmap_gtt_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1905 DRM_IOCTL_DEF_DRV(I915_GEM_SET_DOMAIN
, i915_gem_set_domain_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1906 DRM_IOCTL_DEF_DRV(I915_GEM_SW_FINISH
, i915_gem_sw_finish_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1907 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING
, i915_gem_set_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1908 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING
, i915_gem_get_tiling
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1909 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE
, i915_gem_get_aperture_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1910 DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID
, intel_get_pipe_from_crtc_id
, DRM_UNLOCKED
),
1911 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE
, i915_gem_madvise_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1912 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE
, intel_overlay_put_image
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1913 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS
, intel_overlay_attrs
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1914 DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY
, intel_sprite_set_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1915 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY
, intel_sprite_get_colorkey
, DRM_MASTER
|DRM_CONTROL_ALLOW
|DRM_UNLOCKED
),
1916 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT
, i915_gem_wait_ioctl
, DRM_AUTH
|DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1917 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE
, i915_gem_context_create_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1918 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_DESTROY
, i915_gem_context_destroy_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1919 DRM_IOCTL_DEF_DRV(I915_REG_READ
, i915_reg_read_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1920 DRM_IOCTL_DEF_DRV(I915_GET_RESET_STATS
, i915_get_reset_stats_ioctl
, DRM_UNLOCKED
|DRM_RENDER_ALLOW
),
1923 int i915_max_ioctl
= DRM_ARRAY_SIZE(i915_ioctls
);
1926 * This is really ugly: Because old userspace abused the linux agp interface to
1927 * manage the gtt, we need to claim that all intel devices are agp. For
1928 * otherwise the drm core refuses to initialize the agp support code.
1930 int i915_driver_device_is_agp(struct drm_device
* dev
)