1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include <uapi/drm/i915_drm.h>
36 #include "intel_bios.h"
37 #include "intel_ringbuffer.h"
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <drm/intel-gtt.h>
42 #include <linux/backlight.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/pm_qos.h>
47 /* General customization:
50 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
52 #define DRIVER_NAME "i915"
53 #define DRIVER_DESC "Intel Graphics"
54 #define DRIVER_DATE "20080730"
62 #define pipe_name(p) ((p) + 'A')
70 #define transcoder_name(t) ((t) + 'A')
77 #define plane_name(p) ((p) + 'A')
79 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
89 #define port_name(p) ((p) + 'A')
91 enum intel_display_power_domain
{
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER
,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER
,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER
,
98 POWER_DOMAIN_TRANSCODER_A
,
99 POWER_DOMAIN_TRANSCODER_B
,
100 POWER_DOMAIN_TRANSCODER_C
,
101 POWER_DOMAIN_TRANSCODER_EDP
= POWER_DOMAIN_TRANSCODER_A
+ 0xF,
104 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107 #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
111 HPD_PORT_A
= HPD_NONE
, /* PORT_A is internal */
112 HPD_TV
= HPD_NONE
, /* TV is known to be unreliable */
122 #define I915_GEM_GPU_DOMAINS \
123 (I915_GEM_DOMAIN_RENDER | \
124 I915_GEM_DOMAIN_SAMPLER | \
125 I915_GEM_DOMAIN_COMMAND | \
126 I915_GEM_DOMAIN_INSTRUCTION | \
127 I915_GEM_DOMAIN_VERTEX)
129 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
131 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
132 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
133 if ((intel_encoder)->base.crtc == (__crtc))
135 struct drm_i915_private
;
138 DPLL_ID_PRIVATE
= -1, /* non-shared dpll in use */
139 /* real shared dpll ids must be >= 0 */
143 #define I915_NUM_PLLS 2
145 struct intel_dpll_hw_state
{
152 struct intel_shared_dpll
{
153 int refcount
; /* count of number of CRTCs sharing this PLL */
154 int active
; /* count of number of active CRTCs (i.e. DPMS on) */
155 bool on
; /* is the PLL actually active? Disabled during modeset */
157 /* should match the index in the dev_priv->shared_dplls array */
158 enum intel_dpll_id id
;
159 struct intel_dpll_hw_state hw_state
;
160 void (*mode_set
)(struct drm_i915_private
*dev_priv
,
161 struct intel_shared_dpll
*pll
);
162 void (*enable
)(struct drm_i915_private
*dev_priv
,
163 struct intel_shared_dpll
*pll
);
164 void (*disable
)(struct drm_i915_private
*dev_priv
,
165 struct intel_shared_dpll
*pll
);
166 bool (*get_hw_state
)(struct drm_i915_private
*dev_priv
,
167 struct intel_shared_dpll
*pll
,
168 struct intel_dpll_hw_state
*hw_state
);
171 /* Used by dp and fdi links */
172 struct intel_link_m_n
{
180 void intel_link_compute_m_n(int bpp
, int nlanes
,
181 int pixel_clock
, int link_clock
,
182 struct intel_link_m_n
*m_n
);
184 struct intel_ddi_plls
{
190 /* Interface history:
193 * 1.2: Add Power Management
194 * 1.3: Add vblank support
195 * 1.4: Fix cmdbuffer path, add heap destroy
196 * 1.5: Add vblank pipe configuration
197 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
198 * - Support vertical blank on secondary display pipe
200 #define DRIVER_MAJOR 1
201 #define DRIVER_MINOR 6
202 #define DRIVER_PATCHLEVEL 0
204 #define WATCH_LISTS 0
207 #define I915_GEM_PHYS_CURSOR_0 1
208 #define I915_GEM_PHYS_CURSOR_1 2
209 #define I915_GEM_PHYS_OVERLAY_REGS 3
210 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
212 struct drm_i915_gem_phys_object
{
214 struct page
**page_list
;
215 drm_dma_handle_t
*handle
;
216 struct drm_i915_gem_object
*cur_obj
;
219 struct opregion_header
;
220 struct opregion_acpi
;
221 struct opregion_swsci
;
222 struct opregion_asle
;
224 struct intel_opregion
{
225 struct opregion_header __iomem
*header
;
226 struct opregion_acpi __iomem
*acpi
;
227 struct opregion_swsci __iomem
*swsci
;
228 struct opregion_asle __iomem
*asle
;
230 u32 __iomem
*lid_state
;
232 #define OPREGION_SIZE (8*1024)
234 struct intel_overlay
;
235 struct intel_overlay_error_state
;
237 struct drm_i915_master_private
{
238 drm_local_map_t
*sarea
;
239 struct _drm_i915_sarea
*sarea_priv
;
241 #define I915_FENCE_REG_NONE -1
242 #define I915_MAX_NUM_FENCES 32
243 /* 32 fences + sign bit for FENCE_REG_NONE */
244 #define I915_MAX_NUM_FENCE_BITS 6
246 struct drm_i915_fence_reg
{
247 struct list_head lru_list
;
248 struct drm_i915_gem_object
*obj
;
252 struct sdvo_device_mapping
{
261 struct intel_display_error_state
;
263 struct drm_i915_error_state
{
271 bool waiting
[I915_NUM_RINGS
];
272 u32 pipestat
[I915_MAX_PIPES
];
273 u32 tail
[I915_NUM_RINGS
];
274 u32 head
[I915_NUM_RINGS
];
275 u32 ctl
[I915_NUM_RINGS
];
276 u32 ipeir
[I915_NUM_RINGS
];
277 u32 ipehr
[I915_NUM_RINGS
];
278 u32 instdone
[I915_NUM_RINGS
];
279 u32 acthd
[I915_NUM_RINGS
];
280 u32 semaphore_mboxes
[I915_NUM_RINGS
][I915_NUM_RINGS
- 1];
281 u32 semaphore_seqno
[I915_NUM_RINGS
][I915_NUM_RINGS
- 1];
282 u32 rc_psmi
[I915_NUM_RINGS
]; /* sleep state */
283 /* our own tracking of ring head and tail */
284 u32 cpu_ring_head
[I915_NUM_RINGS
];
285 u32 cpu_ring_tail
[I915_NUM_RINGS
];
286 u32 error
; /* gen6+ */
287 u32 err_int
; /* gen7 */
288 u32 instpm
[I915_NUM_RINGS
];
289 u32 instps
[I915_NUM_RINGS
];
290 u32 extra_instdone
[I915_NUM_INSTDONE_REG
];
291 u32 seqno
[I915_NUM_RINGS
];
293 u32 fault_reg
[I915_NUM_RINGS
];
295 u32 faddr
[I915_NUM_RINGS
];
296 u64 fence
[I915_MAX_NUM_FENCES
];
298 struct drm_i915_error_ring
{
299 struct drm_i915_error_object
{
303 } *ringbuffer
, *batchbuffer
, *ctx
;
304 struct drm_i915_error_request
{
310 } ring
[I915_NUM_RINGS
];
311 struct drm_i915_error_buffer
{
318 s32 fence_reg
:I915_MAX_NUM_FENCE_BITS
;
325 } **active_bo
, **pinned_bo
;
326 u32
*active_bo_count
, *pinned_bo_count
;
327 struct intel_overlay_error_state
*overlay
;
328 struct intel_display_error_state
*display
;
331 struct intel_crtc_config
;
336 struct drm_i915_display_funcs
{
337 bool (*fbc_enabled
)(struct drm_device
*dev
);
338 void (*enable_fbc
)(struct drm_crtc
*crtc
, unsigned long interval
);
339 void (*disable_fbc
)(struct drm_device
*dev
);
340 int (*get_display_clock_speed
)(struct drm_device
*dev
);
341 int (*get_fifo_size
)(struct drm_device
*dev
, int plane
);
343 * find_dpll() - Find the best values for the PLL
344 * @limit: limits for the PLL
345 * @crtc: current CRTC
346 * @target: target frequency in kHz
347 * @refclk: reference clock frequency in kHz
348 * @match_clock: if provided, @best_clock P divider must
349 * match the P divider from @match_clock
350 * used for LVDS downclocking
351 * @best_clock: best PLL values found
353 * Returns true on success, false on failure.
355 bool (*find_dpll
)(const struct intel_limit
*limit
,
356 struct drm_crtc
*crtc
,
357 int target
, int refclk
,
358 struct dpll
*match_clock
,
359 struct dpll
*best_clock
);
360 void (*update_wm
)(struct drm_device
*dev
);
361 void (*update_sprite_wm
)(struct drm_plane
*plane
,
362 struct drm_crtc
*crtc
,
363 uint32_t sprite_width
, int pixel_size
,
364 bool enable
, bool scaled
);
365 void (*modeset_global_resources
)(struct drm_device
*dev
);
366 /* Returns the active state of the crtc, and if the crtc is active,
367 * fills out the pipe-config with the hw state. */
368 bool (*get_pipe_config
)(struct intel_crtc
*,
369 struct intel_crtc_config
*);
370 void (*get_clock
)(struct intel_crtc
*, struct intel_crtc_config
*);
371 int (*crtc_mode_set
)(struct drm_crtc
*crtc
,
373 struct drm_framebuffer
*old_fb
);
374 void (*crtc_enable
)(struct drm_crtc
*crtc
);
375 void (*crtc_disable
)(struct drm_crtc
*crtc
);
376 void (*off
)(struct drm_crtc
*crtc
);
377 void (*write_eld
)(struct drm_connector
*connector
,
378 struct drm_crtc
*crtc
);
379 void (*fdi_link_train
)(struct drm_crtc
*crtc
);
380 void (*init_clock_gating
)(struct drm_device
*dev
);
381 int (*queue_flip
)(struct drm_device
*dev
, struct drm_crtc
*crtc
,
382 struct drm_framebuffer
*fb
,
383 struct drm_i915_gem_object
*obj
,
385 int (*update_plane
)(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
,
387 void (*hpd_irq_setup
)(struct drm_device
*dev
);
388 /* clock updates for mode set */
390 /* render clock increase/decrease */
391 /* display clock increase/decrease */
392 /* pll clock increase/decrease */
395 struct intel_uncore_funcs
{
396 void (*force_wake_get
)(struct drm_i915_private
*dev_priv
);
397 void (*force_wake_put
)(struct drm_i915_private
*dev_priv
);
400 struct intel_uncore
{
401 spinlock_t lock
; /** lock is also taken in irq contexts. */
403 struct intel_uncore_funcs funcs
;
406 unsigned forcewake_count
;
409 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
410 func(is_mobile) sep \
413 func(is_i945gm) sep \
415 func(need_gfx_hws) sep \
417 func(is_pineview) sep \
418 func(is_broadwater) sep \
419 func(is_crestline) sep \
420 func(is_ivybridge) sep \
421 func(is_valleyview) sep \
422 func(is_haswell) sep \
423 func(has_force_wake) sep \
425 func(has_pipe_cxsr) sep \
426 func(has_hotplug) sep \
427 func(cursor_needs_physical) sep \
428 func(has_overlay) sep \
429 func(overlay_needs_physical) sep \
430 func(supports_tv) sep \
431 func(has_bsd_ring) sep \
432 func(has_blt_ring) sep \
433 func(has_vebox_ring) sep \
438 #define DEFINE_FLAG(name) u8 name:1
439 #define SEP_SEMICOLON ;
441 struct intel_device_info
{
442 u32 display_mmio_offset
;
445 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG
, SEP_SEMICOLON
);
451 enum i915_cache_level
{
453 I915_CACHE_LLC
, /* also used for snoopable memory on non-LLC */
454 I915_CACHE_L3_LLC
, /* gen7+, L3 sits between the domain specifc
455 caches, eg sampler/render caches, and the
456 large Last-Level-Cache. LLC is coherent with
457 the CPU, but L3 is only visible to the GPU. */
458 I915_CACHE_WT
, /* hsw:gt3e WriteThrough for scanouts */
461 typedef uint32_t gen6_gtt_pte_t
;
463 struct i915_address_space
{
465 struct drm_device
*dev
;
466 struct list_head global_link
;
467 unsigned long start
; /* Start offset always 0 for dri2 */
468 size_t total
; /* size addr space maps (ex. 2GB for ggtt) */
476 * List of objects currently involved in rendering.
478 * Includes buffers having the contents of their GPU caches
479 * flushed, not necessarily primitives. last_rendering_seqno
480 * represents when the rendering involved will be completed.
482 * A reference is held on the buffer while on this list.
484 struct list_head active_list
;
487 * LRU list of objects which are not in the ringbuffer and
488 * are ready to unbind, but are still in the GTT.
490 * last_rendering_seqno is 0 while an object is in this list.
492 * A reference is not held on the buffer while on this list,
493 * as merely being GTT-bound shouldn't prevent its being
494 * freed, and we'll pull it off the list in the free path.
496 struct list_head inactive_list
;
498 /* FIXME: Need a more generic return type */
499 gen6_gtt_pte_t (*pte_encode
)(dma_addr_t addr
,
500 enum i915_cache_level level
);
501 void (*clear_range
)(struct i915_address_space
*vm
,
502 unsigned int first_entry
,
503 unsigned int num_entries
);
504 void (*insert_entries
)(struct i915_address_space
*vm
,
506 unsigned int first_entry
,
507 enum i915_cache_level cache_level
);
508 void (*cleanup
)(struct i915_address_space
*vm
);
511 /* The Graphics Translation Table is the way in which GEN hardware translates a
512 * Graphics Virtual Address into a Physical Address. In addition to the normal
513 * collateral associated with any va->pa translations GEN hardware also has a
514 * portion of the GTT which can be mapped by the CPU and remain both coherent
515 * and correct (in cases like swizzling). That region is referred to as GMADR in
519 struct i915_address_space base
;
520 size_t stolen_size
; /* Total size of stolen memory */
522 unsigned long mappable_end
; /* End offset that we can CPU map */
523 struct io_mapping
*mappable
; /* Mapping to our CPU mappable region */
524 phys_addr_t mappable_base
; /* PA of our GMADR */
526 /** "Graphics Stolen Memory" holds the global PTEs */
534 int (*gtt_probe
)(struct drm_device
*dev
, size_t *gtt_total
,
535 size_t *stolen
, phys_addr_t
*mappable_base
,
536 unsigned long *mappable_end
);
538 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
540 struct i915_hw_ppgtt
{
541 struct i915_address_space base
;
542 unsigned num_pd_entries
;
543 struct page
**pt_pages
;
545 dma_addr_t
*pt_dma_addr
;
547 int (*enable
)(struct drm_device
*dev
);
551 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
552 * VMA's presence cannot be guaranteed before binding, or after unbinding the
553 * object into/from the address space.
555 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
556 * will always be <= an objects lifetime. So object refcounting should cover us.
559 struct drm_mm_node node
;
560 struct drm_i915_gem_object
*obj
;
561 struct i915_address_space
*vm
;
563 /** This object's place on the active/inactive lists */
564 struct list_head mm_list
;
566 struct list_head vma_link
; /* Link in the object's VMA list */
568 /** This vma's place in the batchbuffer or on the eviction list */
569 struct list_head exec_list
;
573 struct i915_ctx_hang_stats
{
574 /* This context had batch pending when hang was declared */
575 unsigned batch_pending
;
577 /* This context had batch active when hang was declared */
578 unsigned batch_active
;
581 /* This must match up with the value previously used for execbuf2.rsvd1. */
582 #define DEFAULT_CONTEXT_ID 0
583 struct i915_hw_context
{
587 struct drm_i915_file_private
*file_priv
;
588 struct intel_ring_buffer
*ring
;
589 struct drm_i915_gem_object
*obj
;
590 struct i915_ctx_hang_stats hang_stats
;
599 struct drm_mm_node
*compressed_fb
;
600 struct drm_mm_node
*compressed_llb
;
602 struct intel_fbc_work
{
603 struct delayed_work work
;
604 struct drm_crtc
*crtc
;
605 struct drm_framebuffer
*fb
;
610 FBC_OK
, /* FBC is enabled */
611 FBC_UNSUPPORTED
, /* FBC is not supported by this chipset */
612 FBC_NO_OUTPUT
, /* no outputs enabled to compress */
613 FBC_STOLEN_TOO_SMALL
, /* not enough space for buffers */
614 FBC_UNSUPPORTED_MODE
, /* interlace or doublescanned mode */
615 FBC_MODE_TOO_LARGE
, /* mode too large for compression */
616 FBC_BAD_PLANE
, /* fbc not supported on plane */
617 FBC_NOT_TILED
, /* buffer not tiled */
618 FBC_MULTIPLE_PIPES
, /* more than one pipe active */
620 FBC_CHIP_DEFAULT
, /* disabled by default on this chip */
625 PSR_NO_SOURCE
, /* Not supported on platform */
626 PSR_NO_SINK
, /* Not supported by panel */
629 PSR_PWR_WELL_ENABLED
,
633 PSR_INTERLACED_ENABLED
,
638 PCH_NONE
= 0, /* No PCH present */
639 PCH_IBX
, /* Ibexpeak PCH */
640 PCH_CPT
, /* Cougarpoint PCH */
641 PCH_LPT
, /* Lynxpoint PCH */
645 enum intel_sbi_destination
{
650 #define QUIRK_PIPEA_FORCE (1<<0)
651 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
652 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
653 #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
656 struct intel_fbc_work
;
659 struct i2c_adapter adapter
;
663 struct i2c_algo_bit_data bit_algo
;
664 struct drm_i915_private
*dev_priv
;
667 struct i915_suspend_saved_registers
{
688 u32 saveTRANS_HTOTAL_A
;
689 u32 saveTRANS_HBLANK_A
;
690 u32 saveTRANS_HSYNC_A
;
691 u32 saveTRANS_VTOTAL_A
;
692 u32 saveTRANS_VBLANK_A
;
693 u32 saveTRANS_VSYNC_A
;
701 u32 savePFIT_PGM_RATIOS
;
702 u32 saveBLC_HIST_CTL
;
704 u32 saveBLC_PWM_CTL2
;
705 u32 saveBLC_CPU_PWM_CTL
;
706 u32 saveBLC_CPU_PWM_CTL2
;
719 u32 saveTRANS_HTOTAL_B
;
720 u32 saveTRANS_HBLANK_B
;
721 u32 saveTRANS_HSYNC_B
;
722 u32 saveTRANS_VTOTAL_B
;
723 u32 saveTRANS_VBLANK_B
;
724 u32 saveTRANS_VSYNC_B
;
738 u32 savePP_ON_DELAYS
;
739 u32 savePP_OFF_DELAYS
;
747 u32 savePFIT_CONTROL
;
748 u32 save_palette_a
[256];
749 u32 save_palette_b
[256];
750 u32 saveDPFC_CB_BASE
;
751 u32 saveFBC_CFB_BASE
;
754 u32 saveFBC_CONTROL2
;
764 u32 saveCACHE_MODE_0
;
765 u32 saveMI_ARB_STATE
;
776 uint64_t saveFENCE
[I915_MAX_NUM_FENCES
];
787 u32 savePIPEA_GMCH_DATA_M
;
788 u32 savePIPEB_GMCH_DATA_M
;
789 u32 savePIPEA_GMCH_DATA_N
;
790 u32 savePIPEB_GMCH_DATA_N
;
791 u32 savePIPEA_DP_LINK_M
;
792 u32 savePIPEB_DP_LINK_M
;
793 u32 savePIPEA_DP_LINK_N
;
794 u32 savePIPEB_DP_LINK_N
;
805 u32 savePCH_DREF_CONTROL
;
806 u32 saveDISP_ARB_CTL
;
807 u32 savePIPEA_DATA_M1
;
808 u32 savePIPEA_DATA_N1
;
809 u32 savePIPEA_LINK_M1
;
810 u32 savePIPEA_LINK_N1
;
811 u32 savePIPEB_DATA_M1
;
812 u32 savePIPEB_DATA_N1
;
813 u32 savePIPEB_LINK_M1
;
814 u32 savePIPEB_LINK_N1
;
815 u32 saveMCHBAR_RENDER_STANDBY
;
816 u32 savePCH_PORT_HOTPLUG
;
819 struct intel_gen6_power_mgmt
{
820 /* work and pm_iir are protected by dev_priv->irq_lock */
821 struct work_struct work
;
824 /* On vlv we need to manually drop to Vmin with a delayed work. */
825 struct delayed_work vlv_work
;
827 /* The below variables an all the rps hw state are protected by
828 * dev->struct mutext. */
835 struct delayed_work delayed_resume_work
;
838 * Protects RPS/RC6 register access and PCU communication.
839 * Must be taken after struct_mutex if nested.
841 struct mutex hw_lock
;
844 /* defined intel_pm.c */
845 extern spinlock_t mchdev_lock
;
847 struct intel_ilk_power_mgmt
{
855 unsigned long last_time1
;
856 unsigned long chipset_power
;
858 struct timespec last_time2
;
859 unsigned long gfx_power
;
865 struct drm_i915_gem_object
*pwrctx
;
866 struct drm_i915_gem_object
*renderctx
;
869 /* Power well structure for haswell */
870 struct i915_power_well
{
871 struct drm_device
*device
;
873 /* power well enable/disable usage count */
878 struct i915_dri1_state
{
879 unsigned allow_batchbuffer
: 1;
880 u32 __iomem
*gfx_hws_cpu_addr
;
891 struct i915_ums_state
{
893 * Flag if the X Server, and thus DRM, is not currently in
894 * control of the device.
896 * This is set between LeaveVT and EnterVT. It needs to be
897 * replaced with a semaphore. It also needs to be
898 * transitioned away from for kernel modesetting.
903 struct intel_l3_parity
{
905 struct work_struct error_work
;
909 /** Memory allocator for GTT stolen memory */
910 struct drm_mm stolen
;
911 /** List of all objects in gtt_space. Used to restore gtt
912 * mappings on resume */
913 struct list_head bound_list
;
915 * List of objects which are not bound to the GTT (thus
916 * are idle and not used by the GPU) but still have
917 * (presumably uncached) pages still attached.
919 struct list_head unbound_list
;
921 /** Usable portion of the GTT for GEM */
922 unsigned long stolen_base
; /* limited to low memory (32-bit) */
924 /** PPGTT used for aliasing the PPGTT with the GTT */
925 struct i915_hw_ppgtt
*aliasing_ppgtt
;
927 struct shrinker inactive_shrinker
;
928 bool shrinker_no_lock_stealing
;
930 /** LRU list of objects with fence regs on them. */
931 struct list_head fence_list
;
934 * We leave the user IRQ off as much as possible,
935 * but this means that requests will finish and never
936 * be retired once the system goes idle. Set a timer to
937 * fire periodically while the ring is running. When it
938 * fires, go retire requests.
940 struct delayed_work retire_work
;
943 * Are we in a non-interruptible section of code like
948 /** Bit 6 swizzling required for X tiling */
949 uint32_t bit_6_swizzle_x
;
950 /** Bit 6 swizzling required for Y tiling */
951 uint32_t bit_6_swizzle_y
;
953 /* storage for physical objects */
954 struct drm_i915_gem_phys_object
*phys_objs
[I915_MAX_PHYS_OBJECT
];
956 /* accounting, useful for userland debugging */
957 spinlock_t object_stat_lock
;
958 size_t object_memory
;
962 struct drm_i915_error_state_buf
{
971 struct i915_error_state_file_priv
{
972 struct drm_device
*dev
;
973 struct drm_i915_error_state
*error
;
976 struct i915_gpu_error
{
977 /* For hangcheck timer */
978 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
979 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
980 struct timer_list hangcheck_timer
;
982 /* For reset and error_state handling. */
984 /* Protected by the above dev->gpu_error.lock. */
985 struct drm_i915_error_state
*first_error
;
986 struct work_struct work
;
988 unsigned long last_reset
;
991 * State variable and reset counter controlling the reset flow
993 * Upper bits are for the reset counter. This counter is used by the
994 * wait_seqno code to race-free noticed that a reset event happened and
995 * that it needs to restart the entire ioctl (since most likely the
996 * seqno it waited for won't ever signal anytime soon).
998 * This is important for lock-free wait paths, where no contended lock
999 * naturally enforces the correct ordering between the bail-out of the
1000 * waiter and the gpu reset work code.
1002 * Lowest bit controls the reset state machine: Set means a reset is in
1003 * progress. This state will (presuming we don't have any bugs) decay
1004 * into either unset (successful reset) or the special WEDGED value (hw
1005 * terminally sour). All waiters on the reset_queue will be woken when
1008 atomic_t reset_counter
;
1011 * Special values/flags for reset_counter
1013 * Note that the code relies on
1014 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
1017 #define I915_RESET_IN_PROGRESS_FLAG 1
1018 #define I915_WEDGED 0xffffffff
1021 * Waitqueue to signal when the reset has completed. Used by clients
1022 * that wait for dev_priv->mm.wedged to settle.
1024 wait_queue_head_t reset_queue
;
1026 /* For gpu hang simulation. */
1027 unsigned int stop_rings
;
1030 enum modeset_restore
{
1031 MODESET_ON_LID_OPEN
,
1036 struct intel_vbt_data
{
1037 struct drm_display_mode
*lfp_lvds_vbt_mode
; /* if any */
1038 struct drm_display_mode
*sdvo_lvds_vbt_mode
; /* if any */
1041 unsigned int int_tv_support
:1;
1042 unsigned int lvds_dither
:1;
1043 unsigned int lvds_vbt
:1;
1044 unsigned int int_crt_support
:1;
1045 unsigned int lvds_use_ssc
:1;
1046 unsigned int display_clock_mode
:1;
1047 unsigned int fdi_rx_polarity_inverted
:1;
1049 unsigned int bios_lvds_val
; /* initial [PCH_]LVDS reg val in VBIOS */
1054 int edp_preemphasis
;
1056 bool edp_initialized
;
1059 struct edp_power_seq edp_pps
;
1064 struct child_device_config
*child_dev
;
1067 enum intel_ddb_partitioning
{
1069 INTEL_DDB_PART_5_6
, /* IVB+ */
1072 struct intel_wm_level
{
1081 * This struct tracks the state needed for the Package C8+ feature.
1083 * Package states C8 and deeper are really deep PC states that can only be
1084 * reached when all the devices on the system allow it, so even if the graphics
1085 * device allows PC8+, it doesn't mean the system will actually get to these
1088 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1089 * is disabled and the GPU is idle. When these conditions are met, we manually
1090 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1093 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1094 * the state of some registers, so when we come back from PC8+ we need to
1095 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1096 * need to take care of the registers kept by RC6.
1098 * The interrupt disabling is part of the requirements. We can only leave the
1099 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1100 * can lock the machine.
1102 * Ideally every piece of our code that needs PC8+ disabled would call
1103 * hsw_disable_package_c8, which would increment disable_count and prevent the
1104 * system from reaching PC8+. But we don't have a symmetric way to do this for
1105 * everything, so we have the requirements_met and gpu_idle variables. When we
1106 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1107 * increase it in the opposite case. The requirements_met variable is true when
1108 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1109 * variable is true when the GPU is idle.
1111 * In addition to everything, we only actually enable PC8+ if disable_count
1112 * stays at zero for at least some seconds. This is implemented with the
1113 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1114 * consecutive times when all screens are disabled and some background app
1115 * queries the state of our connectors, or we have some application constantly
1116 * waking up to use the GPU. Only after the enable_work function actually
1117 * enables PC8+ the "enable" variable will become true, which means that it can
1118 * be false even if disable_count is 0.
1120 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1121 * goes back to false exactly before we reenable the IRQs. We use this variable
1122 * to check if someone is trying to enable/disable IRQs while they're supposed
1123 * to be disabled. This shouldn't happen and we'll print some error messages in
1124 * case it happens, but if it actually happens we'll also update the variables
1125 * inside struct regsave so when we restore the IRQs they will contain the
1126 * latest expected values.
1128 * For more, read "Display Sequences for Package C8" on our documentation.
1130 struct i915_package_c8
{
1131 bool requirements_met
;
1134 /* Only true after the delayed work task actually enables it. */
1138 struct delayed_work enable_work
;
1145 uint32_t gen6_pmimr
;
1149 typedef struct drm_i915_private
{
1150 struct drm_device
*dev
;
1151 struct kmem_cache
*slab
;
1153 const struct intel_device_info
*info
;
1155 int relative_constants_mode
;
1159 struct intel_uncore uncore
;
1161 struct intel_gmbus gmbus
[GMBUS_NUM_PORTS
];
1164 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1165 * controller on different i2c buses. */
1166 struct mutex gmbus_mutex
;
1169 * Base address of the gmbus and gpio block.
1171 uint32_t gpio_mmio_base
;
1173 wait_queue_head_t gmbus_wait_queue
;
1175 struct pci_dev
*bridge_dev
;
1176 struct intel_ring_buffer ring
[I915_NUM_RINGS
];
1177 uint32_t last_seqno
, next_seqno
;
1179 drm_dma_handle_t
*status_page_dmah
;
1180 struct resource mch_res
;
1182 atomic_t irq_received
;
1184 /* protects the irq masks */
1185 spinlock_t irq_lock
;
1187 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1188 struct pm_qos_request pm_qos
;
1190 /* DPIO indirect register protection */
1191 struct mutex dpio_lock
;
1193 /** Cached value of IMR to avoid reads in updating the bitfield */
1198 struct work_struct hotplug_work
;
1199 bool enable_hotplug_processing
;
1201 unsigned long hpd_last_jiffies
;
1206 HPD_MARK_DISABLED
= 2
1208 } hpd_stats
[HPD_NUM_PINS
];
1210 struct timer_list hotplug_reenable_timer
;
1214 struct i915_fbc fbc
;
1215 struct intel_opregion opregion
;
1216 struct intel_vbt_data vbt
;
1219 struct intel_overlay
*overlay
;
1220 unsigned int sprite_scaling_enabled
;
1226 spinlock_t lock
; /* bl registers and the above bl fields */
1227 struct backlight_device
*device
;
1231 bool no_aux_handshake
;
1233 struct drm_i915_fence_reg fence_regs
[I915_MAX_NUM_FENCES
]; /* assume 965 */
1234 int fence_reg_start
; /* 4 if userland hasn't ioctl'd us yet */
1235 int num_fence_regs
; /* 8 on pre-965, 16 otherwise */
1237 unsigned int fsb_freq
, mem_freq
, is_ddr3
;
1239 struct workqueue_struct
*wq
;
1241 /* Display functions */
1242 struct drm_i915_display_funcs display
;
1244 /* PCH chipset type */
1245 enum intel_pch pch_type
;
1246 unsigned short pch_id
;
1248 unsigned long quirks
;
1250 enum modeset_restore modeset_restore
;
1251 struct mutex modeset_restore_lock
;
1253 struct list_head vm_list
; /* Global list of all address spaces */
1254 struct i915_gtt gtt
; /* VMA representing the global address space */
1256 struct i915_gem_mm mm
;
1258 /* Kernel Modesetting */
1260 struct sdvo_device_mapping sdvo_mappings
[2];
1262 struct drm_crtc
*plane_to_crtc_mapping
[3];
1263 struct drm_crtc
*pipe_to_crtc_mapping
[3];
1264 wait_queue_head_t pending_flip_queue
;
1266 int num_shared_dpll
;
1267 struct intel_shared_dpll shared_dplls
[I915_NUM_PLLS
];
1268 struct intel_ddi_plls ddi_plls
;
1270 /* Reclocking support */
1271 bool render_reclock_avail
;
1272 bool lvds_downclock_avail
;
1273 /* indicates the reduced downclock for LVDS*/
1277 bool mchbar_need_disable
;
1279 struct intel_l3_parity l3_parity
;
1281 /* Cannot be determined by PCIID. You must always read a register. */
1284 /* gen6+ rps state */
1285 struct intel_gen6_power_mgmt rps
;
1287 /* ilk-only ips/rps state. Everything in here is protected by the global
1288 * mchdev_lock in intel_pm.c */
1289 struct intel_ilk_power_mgmt ips
;
1291 /* Haswell power well */
1292 struct i915_power_well power_well
;
1294 enum no_psr_reason no_psr_reason
;
1296 struct i915_gpu_error gpu_error
;
1298 struct drm_i915_gem_object
*vlv_pctx
;
1300 /* list of fbdev register on this device */
1301 struct intel_fbdev
*fbdev
;
1304 * The console may be contended at resume, but we don't
1305 * want it to block on it.
1307 struct work_struct console_resume_work
;
1309 struct drm_property
*broadcast_rgb_property
;
1310 struct drm_property
*force_audio_property
;
1312 bool hw_contexts_disabled
;
1313 uint32_t hw_context_size
;
1317 struct i915_suspend_saved_registers regfile
;
1321 * Raw watermark latency values:
1322 * in 0.1us units for WM0,
1323 * in 0.5us units for WM1+.
1326 uint16_t pri_latency
[5];
1328 uint16_t spr_latency
[5];
1330 uint16_t cur_latency
[5];
1333 struct i915_package_c8 pc8
;
1335 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1337 struct i915_dri1_state dri1
;
1338 /* Old ums support infrastructure, same warning applies. */
1339 struct i915_ums_state ums
;
1340 } drm_i915_private_t
;
1342 static inline struct drm_i915_private
*to_i915(const struct drm_device
*dev
)
1344 return dev
->dev_private
;
1347 /* Iterate over initialised rings */
1348 #define for_each_ring(ring__, dev_priv__, i__) \
1349 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1350 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1352 enum hdmi_force_audio
{
1353 HDMI_AUDIO_OFF_DVI
= -2, /* no aux data for HDMI-DVI converter */
1354 HDMI_AUDIO_OFF
, /* force turn off HDMI audio */
1355 HDMI_AUDIO_AUTO
, /* trust EDID */
1356 HDMI_AUDIO_ON
, /* force turn on HDMI audio */
1359 #define I915_GTT_OFFSET_NONE ((u32)-1)
1361 struct drm_i915_gem_object_ops
{
1362 /* Interface between the GEM object and its backing storage.
1363 * get_pages() is called once prior to the use of the associated set
1364 * of pages before to binding them into the GTT, and put_pages() is
1365 * called after we no longer need them. As we expect there to be
1366 * associated cost with migrating pages between the backing storage
1367 * and making them available for the GPU (e.g. clflush), we may hold
1368 * onto the pages after they are no longer referenced by the GPU
1369 * in case they may be used again shortly (for example migrating the
1370 * pages to a different memory domain within the GTT). put_pages()
1371 * will therefore most likely be called when the object itself is
1372 * being released or under memory pressure (where we attempt to
1373 * reap pages for the shrinker).
1375 int (*get_pages
)(struct drm_i915_gem_object
*);
1376 void (*put_pages
)(struct drm_i915_gem_object
*);
1379 struct drm_i915_gem_object
{
1380 struct drm_gem_object base
;
1382 const struct drm_i915_gem_object_ops
*ops
;
1384 /** List of VMAs backed by this object */
1385 struct list_head vma_list
;
1387 /** Stolen memory for this object, instead of being backed by shmem. */
1388 struct drm_mm_node
*stolen
;
1389 struct list_head global_list
;
1391 struct list_head ring_list
;
1392 /** Used in execbuf to temporarily hold a ref */
1393 struct list_head obj_exec_link
;
1394 /** This object's place in the batchbuffer or on the eviction list */
1395 struct list_head exec_list
;
1398 * This is set if the object is on the active lists (has pending
1399 * rendering and so a non-zero seqno), and is not set if it i s on
1400 * inactive (ready to be unbound) list.
1402 unsigned int active
:1;
1405 * This is set if the object has been written to since last bound
1408 unsigned int dirty
:1;
1411 * Fence register bits (if any) for this object. Will be set
1412 * as needed when mapped into the GTT.
1413 * Protected by dev->struct_mutex.
1415 signed int fence_reg
:I915_MAX_NUM_FENCE_BITS
;
1418 * Advice: are the backing pages purgeable?
1420 unsigned int madv
:2;
1423 * Current tiling mode for the object.
1425 unsigned int tiling_mode
:2;
1427 * Whether the tiling parameters for the currently associated fence
1428 * register have changed. Note that for the purposes of tracking
1429 * tiling changes we also treat the unfenced register, the register
1430 * slot that the object occupies whilst it executes a fenced
1431 * command (such as BLT on gen2/3), as a "fence".
1433 unsigned int fence_dirty
:1;
1435 /** How many users have pinned this object in GTT space. The following
1436 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1437 * (via user_pin_count), execbuffer (objects are not allowed multiple
1438 * times for the same batchbuffer), and the framebuffer code. When
1439 * switching/pageflipping, the framebuffer code has at most two buffers
1442 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1443 * bits with absolutely no headroom. So use 4 bits. */
1444 unsigned int pin_count
:4;
1445 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1448 * Is the object at the current location in the gtt mappable and
1449 * fenceable? Used to avoid costly recalculations.
1451 unsigned int map_and_fenceable
:1;
1454 * Whether the current gtt mapping needs to be mappable (and isn't just
1455 * mappable by accident). Track pin and fault separate for a more
1456 * accurate mappable working set.
1458 unsigned int fault_mappable
:1;
1459 unsigned int pin_mappable
:1;
1460 unsigned int pin_display
:1;
1463 * Is the GPU currently using a fence to access this buffer,
1465 unsigned int pending_fenced_gpu_access
:1;
1466 unsigned int fenced_gpu_access
:1;
1468 unsigned int cache_level
:3;
1470 unsigned int has_aliasing_ppgtt_mapping
:1;
1471 unsigned int has_global_gtt_mapping
:1;
1472 unsigned int has_dma_mapping
:1;
1474 struct sg_table
*pages
;
1475 int pages_pin_count
;
1477 /* prime dma-buf support */
1478 void *dma_buf_vmapping
;
1482 * Used for performing relocations during execbuffer insertion.
1484 struct hlist_node exec_node
;
1485 unsigned long exec_handle
;
1486 struct drm_i915_gem_exec_object2
*exec_entry
;
1488 struct intel_ring_buffer
*ring
;
1490 /** Breadcrumb of last rendering to the buffer. */
1491 uint32_t last_read_seqno
;
1492 uint32_t last_write_seqno
;
1493 /** Breadcrumb of last fenced GPU access to the buffer. */
1494 uint32_t last_fenced_seqno
;
1496 /** Current tiling stride for the object, if it's tiled. */
1499 /** Record of address bit 17 of each page at last unbind. */
1500 unsigned long *bit_17
;
1502 /** User space pin count and filp owning the pin */
1503 uint32_t user_pin_count
;
1504 struct drm_file
*pin_filp
;
1506 /** for phy allocated objects */
1507 struct drm_i915_gem_phys_object
*phys_obj
;
1509 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1511 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1514 * Request queue structure.
1516 * The request queue allows us to note sequence numbers that have been emitted
1517 * and may be associated with active buffers to be retired.
1519 * By keeping this list, we can avoid having to do questionable
1520 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1521 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1523 struct drm_i915_gem_request
{
1524 /** On Which ring this request was generated */
1525 struct intel_ring_buffer
*ring
;
1527 /** GEM sequence number associated with this request. */
1530 /** Position in the ringbuffer of the start of the request */
1533 /** Position in the ringbuffer of the end of the request */
1536 /** Context related to this request */
1537 struct i915_hw_context
*ctx
;
1539 /** Batch buffer related to this request if any */
1540 struct drm_i915_gem_object
*batch_obj
;
1542 /** Time at which this request was emitted, in jiffies. */
1543 unsigned long emitted_jiffies
;
1545 /** global list entry for this request */
1546 struct list_head list
;
1548 struct drm_i915_file_private
*file_priv
;
1549 /** file_priv list entry for this request */
1550 struct list_head client_list
;
1553 struct drm_i915_file_private
{
1556 struct list_head request_list
;
1558 struct idr context_idr
;
1560 struct i915_ctx_hang_stats hang_stats
;
1563 #define INTEL_INFO(dev) (to_i915(dev)->info)
1565 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1566 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1567 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1568 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1569 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1570 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1571 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1572 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1573 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1574 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1575 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1576 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1577 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1578 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1579 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1580 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1581 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1582 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1583 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1584 (dev)->pci_device == 0x0152 || \
1585 (dev)->pci_device == 0x015a)
1586 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1587 (dev)->pci_device == 0x0106 || \
1588 (dev)->pci_device == 0x010A)
1589 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1590 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1591 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1592 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1593 ((dev)->pci_device & 0xFF00) == 0x0C00)
1594 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1595 ((dev)->pci_device & 0xFF00) == 0x0A00)
1598 * The genX designation typically refers to the render engine, so render
1599 * capability related checks should use IS_GEN, while display and other checks
1600 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1603 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1604 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1605 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1606 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1607 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1608 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1610 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1611 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1612 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->has_vebox_ring)
1613 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1614 #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1615 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1617 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1618 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1620 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1621 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1623 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1624 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1626 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1627 * rows, which changed the alignment requirements and fence programming.
1629 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1631 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1632 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1633 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1634 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1635 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1636 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1638 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1639 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1640 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1642 #define HAS_IPS(dev) (IS_ULT(dev))
1644 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1645 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1646 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1648 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1649 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1650 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1651 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1652 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1653 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1655 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
1656 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1657 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1658 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1659 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1660 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1662 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1664 #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1666 #define GT_FREQUENCY_MULTIPLIER 50
1668 #include "i915_trace.h"
1671 * RC6 is a special power stage which allows the GPU to enter an very
1672 * low-voltage mode when idle, using down to 0V while at this stage. This
1673 * stage is entered automatically when the GPU is idle when RC6 support is
1674 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1676 * There are different RC6 modes available in Intel GPU, which differentiate
1677 * among each other with the latency required to enter and leave RC6 and
1678 * voltage consumed by the GPU in different states.
1680 * The combination of the following flags define which states GPU is allowed
1681 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1682 * RC6pp is deepest RC6. Their support by hardware varies according to the
1683 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1684 * which brings the most power savings; deeper states save more power, but
1685 * require higher latency to switch to and wake up.
1687 #define INTEL_RC6_ENABLE (1<<0)
1688 #define INTEL_RC6p_ENABLE (1<<1)
1689 #define INTEL_RC6pp_ENABLE (1<<2)
1691 extern const struct drm_ioctl_desc i915_ioctls
[];
1692 extern int i915_max_ioctl
;
1693 extern unsigned int i915_fbpercrtc __always_unused
;
1694 extern int i915_panel_ignore_lid __read_mostly
;
1695 extern unsigned int i915_powersave __read_mostly
;
1696 extern int i915_semaphores __read_mostly
;
1697 extern unsigned int i915_lvds_downclock __read_mostly
;
1698 extern int i915_lvds_channel_mode __read_mostly
;
1699 extern int i915_panel_use_ssc __read_mostly
;
1700 extern int i915_vbt_sdvo_panel_type __read_mostly
;
1701 extern int i915_enable_rc6 __read_mostly
;
1702 extern int i915_enable_fbc __read_mostly
;
1703 extern bool i915_enable_hangcheck __read_mostly
;
1704 extern int i915_enable_ppgtt __read_mostly
;
1705 extern int i915_enable_psr __read_mostly
;
1706 extern unsigned int i915_preliminary_hw_support __read_mostly
;
1707 extern int i915_disable_power_well __read_mostly
;
1708 extern int i915_enable_ips __read_mostly
;
1709 extern bool i915_fastboot __read_mostly
;
1710 extern int i915_enable_pc8 __read_mostly
;
1711 extern int i915_pc8_timeout __read_mostly
;
1712 extern bool i915_prefault_disable __read_mostly
;
1714 extern int i915_suspend(struct drm_device
*dev
, pm_message_t state
);
1715 extern int i915_resume(struct drm_device
*dev
);
1716 extern int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
);
1717 extern void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
);
1720 void i915_update_dri1_breadcrumb(struct drm_device
*dev
);
1721 extern void i915_kernel_lost_context(struct drm_device
* dev
);
1722 extern int i915_driver_load(struct drm_device
*, unsigned long flags
);
1723 extern int i915_driver_unload(struct drm_device
*);
1724 extern int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file_priv
);
1725 extern void i915_driver_lastclose(struct drm_device
* dev
);
1726 extern void i915_driver_preclose(struct drm_device
*dev
,
1727 struct drm_file
*file_priv
);
1728 extern void i915_driver_postclose(struct drm_device
*dev
,
1729 struct drm_file
*file_priv
);
1730 extern int i915_driver_device_is_agp(struct drm_device
* dev
);
1731 #ifdef CONFIG_COMPAT
1732 extern long i915_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1735 extern int i915_emit_box(struct drm_device
*dev
,
1736 struct drm_clip_rect
*box
,
1738 extern int intel_gpu_reset(struct drm_device
*dev
);
1739 extern int i915_reset(struct drm_device
*dev
);
1740 extern unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
);
1741 extern unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
);
1742 extern unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
);
1743 extern void i915_update_gfx_val(struct drm_i915_private
*dev_priv
);
1745 extern void intel_console_resume(struct work_struct
*work
);
1748 void i915_queue_hangcheck(struct drm_device
*dev
);
1749 void i915_handle_error(struct drm_device
*dev
, bool wedged
);
1751 extern void intel_irq_init(struct drm_device
*dev
);
1752 extern void intel_pm_init(struct drm_device
*dev
);
1753 extern void intel_hpd_init(struct drm_device
*dev
);
1754 extern void intel_pm_init(struct drm_device
*dev
);
1756 extern void intel_uncore_sanitize(struct drm_device
*dev
);
1757 extern void intel_uncore_early_sanitize(struct drm_device
*dev
);
1758 extern void intel_uncore_init(struct drm_device
*dev
);
1759 extern void intel_uncore_clear_errors(struct drm_device
*dev
);
1760 extern void intel_uncore_check_errors(struct drm_device
*dev
);
1763 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
);
1766 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, int pipe
, u32 mask
);
1769 int i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
1770 struct drm_file
*file_priv
);
1771 int i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
1772 struct drm_file
*file_priv
);
1773 int i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
1774 struct drm_file
*file_priv
);
1775 int i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
1776 struct drm_file
*file_priv
);
1777 int i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
1778 struct drm_file
*file_priv
);
1779 int i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
1780 struct drm_file
*file_priv
);
1781 int i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
1782 struct drm_file
*file_priv
);
1783 int i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
1784 struct drm_file
*file_priv
);
1785 int i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1786 struct drm_file
*file_priv
);
1787 int i915_gem_execbuffer2(struct drm_device
*dev
, void *data
,
1788 struct drm_file
*file_priv
);
1789 int i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
1790 struct drm_file
*file_priv
);
1791 int i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
1792 struct drm_file
*file_priv
);
1793 int i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
1794 struct drm_file
*file_priv
);
1795 int i915_gem_get_caching_ioctl(struct drm_device
*dev
, void *data
,
1796 struct drm_file
*file
);
1797 int i915_gem_set_caching_ioctl(struct drm_device
*dev
, void *data
,
1798 struct drm_file
*file
);
1799 int i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
1800 struct drm_file
*file_priv
);
1801 int i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
1802 struct drm_file
*file_priv
);
1803 int i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
1804 struct drm_file
*file_priv
);
1805 int i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
1806 struct drm_file
*file_priv
);
1807 int i915_gem_set_tiling(struct drm_device
*dev
, void *data
,
1808 struct drm_file
*file_priv
);
1809 int i915_gem_get_tiling(struct drm_device
*dev
, void *data
,
1810 struct drm_file
*file_priv
);
1811 int i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
1812 struct drm_file
*file_priv
);
1813 int i915_gem_wait_ioctl(struct drm_device
*dev
, void *data
,
1814 struct drm_file
*file_priv
);
1815 void i915_gem_load(struct drm_device
*dev
);
1816 void *i915_gem_object_alloc(struct drm_device
*dev
);
1817 void i915_gem_object_free(struct drm_i915_gem_object
*obj
);
1818 int i915_gem_init_object(struct drm_gem_object
*obj
);
1819 void i915_gem_object_init(struct drm_i915_gem_object
*obj
,
1820 const struct drm_i915_gem_object_ops
*ops
);
1821 struct drm_i915_gem_object
*i915_gem_alloc_object(struct drm_device
*dev
,
1823 void i915_gem_free_object(struct drm_gem_object
*obj
);
1824 struct i915_vma
*i915_gem_vma_create(struct drm_i915_gem_object
*obj
,
1825 struct i915_address_space
*vm
);
1826 void i915_gem_vma_destroy(struct i915_vma
*vma
);
1828 int __must_check
i915_gem_object_pin(struct drm_i915_gem_object
*obj
,
1829 struct i915_address_space
*vm
,
1831 bool map_and_fenceable
,
1833 void i915_gem_object_unpin(struct drm_i915_gem_object
*obj
);
1834 int __must_check
i915_vma_unbind(struct i915_vma
*vma
);
1835 int __must_check
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object
*obj
);
1836 int i915_gem_object_put_pages(struct drm_i915_gem_object
*obj
);
1837 void i915_gem_release_mmap(struct drm_i915_gem_object
*obj
);
1838 void i915_gem_lastclose(struct drm_device
*dev
);
1840 int __must_check
i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
);
1841 static inline struct page
*i915_gem_object_get_page(struct drm_i915_gem_object
*obj
, int n
)
1843 struct sg_page_iter sg_iter
;
1845 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
, n
)
1846 return sg_page_iter_page(&sg_iter
);
1850 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object
*obj
)
1852 BUG_ON(obj
->pages
== NULL
);
1853 obj
->pages_pin_count
++;
1855 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object
*obj
)
1857 BUG_ON(obj
->pages_pin_count
== 0);
1858 obj
->pages_pin_count
--;
1861 int __must_check
i915_mutex_lock_interruptible(struct drm_device
*dev
);
1862 int i915_gem_object_sync(struct drm_i915_gem_object
*obj
,
1863 struct intel_ring_buffer
*to
);
1864 void i915_gem_object_move_to_active(struct drm_i915_gem_object
*obj
,
1865 struct intel_ring_buffer
*ring
);
1867 int i915_gem_dumb_create(struct drm_file
*file_priv
,
1868 struct drm_device
*dev
,
1869 struct drm_mode_create_dumb
*args
);
1870 int i915_gem_mmap_gtt(struct drm_file
*file_priv
, struct drm_device
*dev
,
1871 uint32_t handle
, uint64_t *offset
);
1873 * Returns true if seq1 is later than seq2.
1876 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
1878 return (int32_t)(seq1
- seq2
) >= 0;
1881 int __must_check
i915_gem_get_seqno(struct drm_device
*dev
, u32
*seqno
);
1882 int __must_check
i915_gem_set_seqno(struct drm_device
*dev
, u32 seqno
);
1883 int __must_check
i915_gem_object_get_fence(struct drm_i915_gem_object
*obj
);
1884 int __must_check
i915_gem_object_put_fence(struct drm_i915_gem_object
*obj
);
1887 i915_gem_object_pin_fence(struct drm_i915_gem_object
*obj
)
1889 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
) {
1890 struct drm_i915_private
*dev_priv
= obj
->base
.dev
->dev_private
;
1891 dev_priv
->fence_regs
[obj
->fence_reg
].pin_count
++;
1898 i915_gem_object_unpin_fence(struct drm_i915_gem_object
*obj
)
1900 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
) {
1901 struct drm_i915_private
*dev_priv
= obj
->base
.dev
->dev_private
;
1902 WARN_ON(dev_priv
->fence_regs
[obj
->fence_reg
].pin_count
<= 0);
1903 dev_priv
->fence_regs
[obj
->fence_reg
].pin_count
--;
1907 void i915_gem_retire_requests(struct drm_device
*dev
);
1908 void i915_gem_retire_requests_ring(struct intel_ring_buffer
*ring
);
1909 int __must_check
i915_gem_check_wedge(struct i915_gpu_error
*error
,
1910 bool interruptible
);
1911 static inline bool i915_reset_in_progress(struct i915_gpu_error
*error
)
1913 return unlikely(atomic_read(&error
->reset_counter
)
1914 & I915_RESET_IN_PROGRESS_FLAG
);
1917 static inline bool i915_terminally_wedged(struct i915_gpu_error
*error
)
1919 return atomic_read(&error
->reset_counter
) == I915_WEDGED
;
1922 void i915_gem_reset(struct drm_device
*dev
);
1923 bool i915_gem_clflush_object(struct drm_i915_gem_object
*obj
, bool force
);
1924 int __must_check
i915_gem_object_finish_gpu(struct drm_i915_gem_object
*obj
);
1925 int __must_check
i915_gem_init(struct drm_device
*dev
);
1926 int __must_check
i915_gem_init_hw(struct drm_device
*dev
);
1927 void i915_gem_l3_remap(struct drm_device
*dev
);
1928 void i915_gem_init_swizzling(struct drm_device
*dev
);
1929 void i915_gem_cleanup_ringbuffer(struct drm_device
*dev
);
1930 int __must_check
i915_gpu_idle(struct drm_device
*dev
);
1931 int __must_check
i915_gem_idle(struct drm_device
*dev
);
1932 int __i915_add_request(struct intel_ring_buffer
*ring
,
1933 struct drm_file
*file
,
1934 struct drm_i915_gem_object
*batch_obj
,
1936 #define i915_add_request(ring, seqno) \
1937 __i915_add_request(ring, NULL, NULL, seqno)
1938 int __must_check
i915_wait_seqno(struct intel_ring_buffer
*ring
,
1940 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
);
1942 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object
*obj
,
1945 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object
*obj
, bool write
);
1947 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object
*obj
,
1949 struct intel_ring_buffer
*pipelined
);
1950 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object
*obj
);
1951 int i915_gem_attach_phys_object(struct drm_device
*dev
,
1952 struct drm_i915_gem_object
*obj
,
1955 void i915_gem_detach_phys_object(struct drm_device
*dev
,
1956 struct drm_i915_gem_object
*obj
);
1957 void i915_gem_free_all_phys_object(struct drm_device
*dev
);
1958 void i915_gem_release(struct drm_device
*dev
, struct drm_file
*file
);
1961 i915_gem_get_gtt_size(struct drm_device
*dev
, uint32_t size
, int tiling_mode
);
1963 i915_gem_get_gtt_alignment(struct drm_device
*dev
, uint32_t size
,
1964 int tiling_mode
, bool fenced
);
1966 int i915_gem_object_set_cache_level(struct drm_i915_gem_object
*obj
,
1967 enum i915_cache_level cache_level
);
1969 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
1970 struct dma_buf
*dma_buf
);
1972 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
1973 struct drm_gem_object
*gem_obj
, int flags
);
1975 void i915_gem_restore_fences(struct drm_device
*dev
);
1977 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object
*o
,
1978 struct i915_address_space
*vm
);
1979 bool i915_gem_obj_bound_any(struct drm_i915_gem_object
*o
);
1980 bool i915_gem_obj_bound(struct drm_i915_gem_object
*o
,
1981 struct i915_address_space
*vm
);
1982 unsigned long i915_gem_obj_size(struct drm_i915_gem_object
*o
,
1983 struct i915_address_space
*vm
);
1984 struct i915_vma
*i915_gem_obj_to_vma(struct drm_i915_gem_object
*obj
,
1985 struct i915_address_space
*vm
);
1987 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object
*obj
,
1988 struct i915_address_space
*vm
);
1989 /* Some GGTT VM helpers */
1990 #define obj_to_ggtt(obj) \
1991 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
1992 static inline bool i915_is_ggtt(struct i915_address_space
*vm
)
1994 struct i915_address_space
*ggtt
=
1995 &((struct drm_i915_private
*)(vm
)->dev
->dev_private
)->gtt
.base
;
1999 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object
*obj
)
2001 return i915_gem_obj_bound(obj
, obj_to_ggtt(obj
));
2004 static inline unsigned long
2005 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object
*obj
)
2007 return i915_gem_obj_offset(obj
, obj_to_ggtt(obj
));
2010 static inline unsigned long
2011 i915_gem_obj_ggtt_size(struct drm_i915_gem_object
*obj
)
2013 return i915_gem_obj_size(obj
, obj_to_ggtt(obj
));
2016 static inline int __must_check
2017 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object
*obj
,
2019 bool map_and_fenceable
,
2022 return i915_gem_object_pin(obj
, obj_to_ggtt(obj
), alignment
,
2023 map_and_fenceable
, nonblocking
);
2027 /* i915_gem_context.c */
2028 void i915_gem_context_init(struct drm_device
*dev
);
2029 void i915_gem_context_fini(struct drm_device
*dev
);
2030 void i915_gem_context_close(struct drm_device
*dev
, struct drm_file
*file
);
2031 int i915_switch_context(struct intel_ring_buffer
*ring
,
2032 struct drm_file
*file
, int to_id
);
2033 void i915_gem_context_free(struct kref
*ctx_ref
);
2034 static inline void i915_gem_context_reference(struct i915_hw_context
*ctx
)
2036 kref_get(&ctx
->ref
);
2039 static inline void i915_gem_context_unreference(struct i915_hw_context
*ctx
)
2041 kref_put(&ctx
->ref
, i915_gem_context_free
);
2044 struct i915_ctx_hang_stats
* __must_check
2045 i915_gem_context_get_hang_stats(struct drm_device
*dev
,
2046 struct drm_file
*file
,
2048 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
2049 struct drm_file
*file
);
2050 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
2051 struct drm_file
*file
);
2053 /* i915_gem_gtt.c */
2054 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device
*dev
);
2055 void i915_ppgtt_bind_object(struct i915_hw_ppgtt
*ppgtt
,
2056 struct drm_i915_gem_object
*obj
,
2057 enum i915_cache_level cache_level
);
2058 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt
*ppgtt
,
2059 struct drm_i915_gem_object
*obj
);
2061 void i915_gem_restore_gtt_mappings(struct drm_device
*dev
);
2062 int __must_check
i915_gem_gtt_prepare_object(struct drm_i915_gem_object
*obj
);
2063 void i915_gem_gtt_bind_object(struct drm_i915_gem_object
*obj
,
2064 enum i915_cache_level cache_level
);
2065 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object
*obj
);
2066 void i915_gem_gtt_finish_object(struct drm_i915_gem_object
*obj
);
2067 void i915_gem_init_global_gtt(struct drm_device
*dev
);
2068 void i915_gem_setup_global_gtt(struct drm_device
*dev
, unsigned long start
,
2069 unsigned long mappable_end
, unsigned long end
);
2070 int i915_gem_gtt_init(struct drm_device
*dev
);
2071 static inline void i915_gem_chipset_flush(struct drm_device
*dev
)
2073 if (INTEL_INFO(dev
)->gen
< 6)
2074 intel_gtt_chipset_flush();
2078 /* i915_gem_evict.c */
2079 int __must_check
i915_gem_evict_something(struct drm_device
*dev
,
2080 struct i915_address_space
*vm
,
2083 unsigned cache_level
,
2086 int i915_gem_evict_everything(struct drm_device
*dev
);
2088 /* i915_gem_stolen.c */
2089 int i915_gem_init_stolen(struct drm_device
*dev
);
2090 int i915_gem_stolen_setup_compression(struct drm_device
*dev
, int size
);
2091 void i915_gem_stolen_cleanup_compression(struct drm_device
*dev
);
2092 void i915_gem_cleanup_stolen(struct drm_device
*dev
);
2093 struct drm_i915_gem_object
*
2094 i915_gem_object_create_stolen(struct drm_device
*dev
, u32 size
);
2095 struct drm_i915_gem_object
*
2096 i915_gem_object_create_stolen_for_preallocated(struct drm_device
*dev
,
2100 void i915_gem_object_release_stolen(struct drm_i915_gem_object
*obj
);
2102 /* i915_gem_tiling.c */
2103 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object
*obj
)
2105 drm_i915_private_t
*dev_priv
= obj
->base
.dev
->dev_private
;
2107 return dev_priv
->mm
.bit_6_swizzle_x
== I915_BIT_6_SWIZZLE_9_10_17
&&
2108 obj
->tiling_mode
!= I915_TILING_NONE
;
2111 void i915_gem_detect_bit_6_swizzle(struct drm_device
*dev
);
2112 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object
*obj
);
2113 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object
*obj
);
2115 /* i915_gem_debug.c */
2117 int i915_verify_lists(struct drm_device
*dev
);
2119 #define i915_verify_lists(dev) 0
2122 /* i915_debugfs.c */
2123 int i915_debugfs_init(struct drm_minor
*minor
);
2124 void i915_debugfs_cleanup(struct drm_minor
*minor
);
2126 /* i915_gpu_error.c */
2128 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...);
2129 int i915_error_state_to_str(struct drm_i915_error_state_buf
*estr
,
2130 const struct i915_error_state_file_priv
*error
);
2131 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*eb
,
2132 size_t count
, loff_t pos
);
2133 static inline void i915_error_state_buf_release(
2134 struct drm_i915_error_state_buf
*eb
)
2138 void i915_capture_error_state(struct drm_device
*dev
);
2139 void i915_error_state_get(struct drm_device
*dev
,
2140 struct i915_error_state_file_priv
*error_priv
);
2141 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
);
2142 void i915_destroy_error_state(struct drm_device
*dev
);
2144 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
);
2145 const char *i915_cache_level_str(int type
);
2147 /* i915_suspend.c */
2148 extern int i915_save_state(struct drm_device
*dev
);
2149 extern int i915_restore_state(struct drm_device
*dev
);
2152 void i915_save_display_reg(struct drm_device
*dev
);
2153 void i915_restore_display_reg(struct drm_device
*dev
);
2156 void i915_setup_sysfs(struct drm_device
*dev_priv
);
2157 void i915_teardown_sysfs(struct drm_device
*dev_priv
);
2160 extern int intel_setup_gmbus(struct drm_device
*dev
);
2161 extern void intel_teardown_gmbus(struct drm_device
*dev
);
2162 static inline bool intel_gmbus_is_port_valid(unsigned port
)
2164 return (port
>= GMBUS_PORT_SSC
&& port
<= GMBUS_PORT_DPD
);
2167 extern struct i2c_adapter
*intel_gmbus_get_adapter(
2168 struct drm_i915_private
*dev_priv
, unsigned port
);
2169 extern void intel_gmbus_set_speed(struct i2c_adapter
*adapter
, int speed
);
2170 extern void intel_gmbus_force_bit(struct i2c_adapter
*adapter
, bool force_bit
);
2171 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter
*adapter
)
2173 return container_of(adapter
, struct intel_gmbus
, adapter
)->force_bit
;
2175 extern void intel_i2c_reset(struct drm_device
*dev
);
2177 /* intel_opregion.c */
2178 extern int intel_opregion_setup(struct drm_device
*dev
);
2180 extern void intel_opregion_init(struct drm_device
*dev
);
2181 extern void intel_opregion_fini(struct drm_device
*dev
);
2182 extern void intel_opregion_asle_intr(struct drm_device
*dev
);
2184 static inline void intel_opregion_init(struct drm_device
*dev
) { return; }
2185 static inline void intel_opregion_fini(struct drm_device
*dev
) { return; }
2186 static inline void intel_opregion_asle_intr(struct drm_device
*dev
) { return; }
2191 extern void intel_register_dsm_handler(void);
2192 extern void intel_unregister_dsm_handler(void);
2194 static inline void intel_register_dsm_handler(void) { return; }
2195 static inline void intel_unregister_dsm_handler(void) { return; }
2196 #endif /* CONFIG_ACPI */
2199 extern void intel_modeset_init_hw(struct drm_device
*dev
);
2200 extern void intel_modeset_suspend_hw(struct drm_device
*dev
);
2201 extern void intel_modeset_init(struct drm_device
*dev
);
2202 extern void intel_modeset_gem_init(struct drm_device
*dev
);
2203 extern void intel_modeset_cleanup(struct drm_device
*dev
);
2204 extern int intel_modeset_vga_set_state(struct drm_device
*dev
, bool state
);
2205 extern void intel_modeset_setup_hw_state(struct drm_device
*dev
,
2206 bool force_restore
);
2207 extern void i915_redisable_vga(struct drm_device
*dev
);
2208 extern bool intel_fbc_enabled(struct drm_device
*dev
);
2209 extern void intel_disable_fbc(struct drm_device
*dev
);
2210 extern bool ironlake_set_drps(struct drm_device
*dev
, u8 val
);
2211 extern void intel_init_pch_refclk(struct drm_device
*dev
);
2212 extern void gen6_set_rps(struct drm_device
*dev
, u8 val
);
2213 extern void valleyview_set_rps(struct drm_device
*dev
, u8 val
);
2214 extern int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
);
2215 extern int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
);
2216 extern void intel_detect_pch(struct drm_device
*dev
);
2217 extern int intel_trans_dp_port_sel(struct drm_crtc
*crtc
);
2218 extern int intel_enable_rc6(const struct drm_device
*dev
);
2220 extern bool i915_semaphore_is_enabled(struct drm_device
*dev
);
2221 int i915_reg_read_ioctl(struct drm_device
*dev
, void *data
,
2222 struct drm_file
*file
);
2225 extern struct intel_overlay_error_state
*intel_overlay_capture_error_state(struct drm_device
*dev
);
2226 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf
*e
,
2227 struct intel_overlay_error_state
*error
);
2229 extern struct intel_display_error_state
*intel_display_capture_error_state(struct drm_device
*dev
);
2230 extern void intel_display_print_error_state(struct drm_i915_error_state_buf
*e
,
2231 struct drm_device
*dev
,
2232 struct intel_display_error_state
*error
);
2234 /* On SNB platform, before reading ring registers forcewake bit
2235 * must be set to prevent GT core from power down and stale values being
2238 void gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
);
2239 void gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
);
2241 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u8 mbox
, u32
*val
);
2242 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u8 mbox
, u32 val
);
2244 /* intel_sideband.c */
2245 u32
vlv_punit_read(struct drm_i915_private
*dev_priv
, u8 addr
);
2246 void vlv_punit_write(struct drm_i915_private
*dev_priv
, u8 addr
, u32 val
);
2247 u32
vlv_nc_read(struct drm_i915_private
*dev_priv
, u8 addr
);
2248 u32
vlv_dpio_read(struct drm_i915_private
*dev_priv
, int reg
);
2249 void vlv_dpio_write(struct drm_i915_private
*dev_priv
, int reg
, u32 val
);
2250 u32
intel_sbi_read(struct drm_i915_private
*dev_priv
, u16 reg
,
2251 enum intel_sbi_destination destination
);
2252 void intel_sbi_write(struct drm_i915_private
*dev_priv
, u16 reg
, u32 value
,
2253 enum intel_sbi_destination destination
);
2255 int vlv_gpu_freq(int ddr_freq
, int val
);
2256 int vlv_freq_opcode(int ddr_freq
, int val
);
2258 #define __i915_read(x) \
2259 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace);
2266 #define __i915_write(x) \
2267 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace);
2274 #define I915_READ8(reg) i915_read8(dev_priv, (reg), true)
2275 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val), true)
2277 #define I915_READ16(reg) i915_read16(dev_priv, (reg), true)
2278 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val), true)
2279 #define I915_READ16_NOTRACE(reg) i915_read16(dev_priv, (reg), false)
2280 #define I915_WRITE16_NOTRACE(reg, val) i915_write16(dev_priv, (reg), (val), false)
2282 #define I915_READ(reg) i915_read32(dev_priv, (reg), true)
2283 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val), true)
2284 #define I915_READ_NOTRACE(reg) i915_read32(dev_priv, (reg), false)
2285 #define I915_WRITE_NOTRACE(reg, val) i915_write32(dev_priv, (reg), (val), false)
2287 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val), true)
2288 #define I915_READ64(reg) i915_read64(dev_priv, (reg), true)
2290 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2291 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2293 /* "Broadcast RGB" property */
2294 #define INTEL_BROADCAST_RGB_AUTO 0
2295 #define INTEL_BROADCAST_RGB_FULL 1
2296 #define INTEL_BROADCAST_RGB_LIMITED 2
2298 static inline uint32_t i915_vgacntrl_reg(struct drm_device
*dev
)
2300 if (HAS_PCH_SPLIT(dev
))
2301 return CPU_VGACNTRL
;
2302 else if (IS_VALLEYVIEW(dev
))
2303 return VLV_VGACNTRL
;
2308 static inline void __user
*to_user_ptr(u64 address
)
2310 return (void __user
*)(uintptr_t)address
;
2313 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m
)
2315 unsigned long j
= msecs_to_jiffies(m
);
2317 return min_t(unsigned long, MAX_JIFFY_OFFSET
, j
+ 1);
2320 static inline unsigned long
2321 timespec_to_jiffies_timeout(const struct timespec
*value
)
2323 unsigned long j
= timespec_to_jiffies(value
);
2325 return min_t(unsigned long, MAX_JIFFY_OFFSET
, j
+ 1);