1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
33 #include <uapi/drm/i915_drm.h>
36 #include "intel_bios.h"
37 #include "intel_ringbuffer.h"
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <drm/intel-gtt.h>
42 #include <linux/backlight.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/pm_qos.h>
47 /* General customization:
50 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
52 #define DRIVER_NAME "i915"
53 #define DRIVER_DESC "Intel Graphics"
54 #define DRIVER_DATE "20080730"
63 #define pipe_name(p) ((p) + 'A')
71 #define transcoder_name(t) ((t) + 'A')
78 #define plane_name(p) ((p) + 'A')
80 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
90 #define port_name(p) ((p) + 'A')
92 enum intel_display_power_domain
{
96 POWER_DOMAIN_PIPE_A_PANEL_FITTER
,
97 POWER_DOMAIN_PIPE_B_PANEL_FITTER
,
98 POWER_DOMAIN_PIPE_C_PANEL_FITTER
,
99 POWER_DOMAIN_TRANSCODER_A
,
100 POWER_DOMAIN_TRANSCODER_B
,
101 POWER_DOMAIN_TRANSCODER_C
,
102 POWER_DOMAIN_TRANSCODER_EDP
,
109 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
111 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
112 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
113 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
114 #define POWER_DOMAIN_TRANSCODER(tran) \
115 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
116 (tran) + POWER_DOMAIN_TRANSCODER_A)
118 #define HSW_ALWAYS_ON_POWER_DOMAINS ( \
119 BIT(POWER_DOMAIN_PIPE_A) | \
120 BIT(POWER_DOMAIN_TRANSCODER_EDP))
121 #define BDW_ALWAYS_ON_POWER_DOMAINS ( \
122 BIT(POWER_DOMAIN_PIPE_A) | \
123 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
124 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
128 HPD_PORT_A
= HPD_NONE
, /* PORT_A is internal */
129 HPD_TV
= HPD_NONE
, /* TV is known to be unreliable */
139 #define I915_GEM_GPU_DOMAINS \
140 (I915_GEM_DOMAIN_RENDER | \
141 I915_GEM_DOMAIN_SAMPLER | \
142 I915_GEM_DOMAIN_COMMAND | \
143 I915_GEM_DOMAIN_INSTRUCTION | \
144 I915_GEM_DOMAIN_VERTEX)
146 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
148 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
149 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
150 if ((intel_encoder)->base.crtc == (__crtc))
152 struct drm_i915_private
;
155 DPLL_ID_PRIVATE
= -1, /* non-shared dpll in use */
156 /* real shared dpll ids must be >= 0 */
160 #define I915_NUM_PLLS 2
162 struct intel_dpll_hw_state
{
169 struct intel_shared_dpll
{
170 int refcount
; /* count of number of CRTCs sharing this PLL */
171 int active
; /* count of number of active CRTCs (i.e. DPMS on) */
172 bool on
; /* is the PLL actually active? Disabled during modeset */
174 /* should match the index in the dev_priv->shared_dplls array */
175 enum intel_dpll_id id
;
176 struct intel_dpll_hw_state hw_state
;
177 void (*mode_set
)(struct drm_i915_private
*dev_priv
,
178 struct intel_shared_dpll
*pll
);
179 void (*enable
)(struct drm_i915_private
*dev_priv
,
180 struct intel_shared_dpll
*pll
);
181 void (*disable
)(struct drm_i915_private
*dev_priv
,
182 struct intel_shared_dpll
*pll
);
183 bool (*get_hw_state
)(struct drm_i915_private
*dev_priv
,
184 struct intel_shared_dpll
*pll
,
185 struct intel_dpll_hw_state
*hw_state
);
188 /* Used by dp and fdi links */
189 struct intel_link_m_n
{
197 void intel_link_compute_m_n(int bpp
, int nlanes
,
198 int pixel_clock
, int link_clock
,
199 struct intel_link_m_n
*m_n
);
201 struct intel_ddi_plls
{
207 /* Interface history:
210 * 1.2: Add Power Management
211 * 1.3: Add vblank support
212 * 1.4: Fix cmdbuffer path, add heap destroy
213 * 1.5: Add vblank pipe configuration
214 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
215 * - Support vertical blank on secondary display pipe
217 #define DRIVER_MAJOR 1
218 #define DRIVER_MINOR 6
219 #define DRIVER_PATCHLEVEL 0
221 #define WATCH_LISTS 0
224 #define I915_GEM_PHYS_CURSOR_0 1
225 #define I915_GEM_PHYS_CURSOR_1 2
226 #define I915_GEM_PHYS_OVERLAY_REGS 3
227 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
229 struct drm_i915_gem_phys_object
{
231 struct page
**page_list
;
232 drm_dma_handle_t
*handle
;
233 struct drm_i915_gem_object
*cur_obj
;
236 struct opregion_header
;
237 struct opregion_acpi
;
238 struct opregion_swsci
;
239 struct opregion_asle
;
241 struct intel_opregion
{
242 struct opregion_header __iomem
*header
;
243 struct opregion_acpi __iomem
*acpi
;
244 struct opregion_swsci __iomem
*swsci
;
245 u32 swsci_gbda_sub_functions
;
246 u32 swsci_sbcb_sub_functions
;
247 struct opregion_asle __iomem
*asle
;
249 u32 __iomem
*lid_state
;
250 struct work_struct asle_work
;
252 #define OPREGION_SIZE (8*1024)
254 struct intel_overlay
;
255 struct intel_overlay_error_state
;
257 struct drm_i915_master_private
{
258 drm_local_map_t
*sarea
;
259 struct _drm_i915_sarea
*sarea_priv
;
261 #define I915_FENCE_REG_NONE -1
262 #define I915_MAX_NUM_FENCES 32
263 /* 32 fences + sign bit for FENCE_REG_NONE */
264 #define I915_MAX_NUM_FENCE_BITS 6
266 struct drm_i915_fence_reg
{
267 struct list_head lru_list
;
268 struct drm_i915_gem_object
*obj
;
272 struct sdvo_device_mapping
{
281 struct intel_display_error_state
;
283 struct drm_i915_error_state
{
291 bool waiting
[I915_NUM_RINGS
];
292 u32 pipestat
[I915_MAX_PIPES
];
293 u32 tail
[I915_NUM_RINGS
];
294 u32 head
[I915_NUM_RINGS
];
295 u32 ctl
[I915_NUM_RINGS
];
296 u32 ipeir
[I915_NUM_RINGS
];
297 u32 ipehr
[I915_NUM_RINGS
];
298 u32 instdone
[I915_NUM_RINGS
];
299 u32 acthd
[I915_NUM_RINGS
];
300 u32 semaphore_mboxes
[I915_NUM_RINGS
][I915_NUM_RINGS
- 1];
301 u32 semaphore_seqno
[I915_NUM_RINGS
][I915_NUM_RINGS
- 1];
302 u32 rc_psmi
[I915_NUM_RINGS
]; /* sleep state */
303 /* our own tracking of ring head and tail */
304 u32 cpu_ring_head
[I915_NUM_RINGS
];
305 u32 cpu_ring_tail
[I915_NUM_RINGS
];
306 u32 error
; /* gen6+ */
307 u32 err_int
; /* gen7 */
308 u32 bbstate
[I915_NUM_RINGS
];
309 u32 instpm
[I915_NUM_RINGS
];
310 u32 instps
[I915_NUM_RINGS
];
311 u32 extra_instdone
[I915_NUM_INSTDONE_REG
];
312 u32 seqno
[I915_NUM_RINGS
];
314 u32 fault_reg
[I915_NUM_RINGS
];
316 u32 faddr
[I915_NUM_RINGS
];
317 u64 fence
[I915_MAX_NUM_FENCES
];
319 struct drm_i915_error_ring
{
320 struct drm_i915_error_object
{
324 } *ringbuffer
, *batchbuffer
, *ctx
;
325 struct drm_i915_error_request
{
331 } ring
[I915_NUM_RINGS
];
332 struct drm_i915_error_buffer
{
339 s32 fence_reg
:I915_MAX_NUM_FENCE_BITS
;
346 } **active_bo
, **pinned_bo
;
347 u32
*active_bo_count
, *pinned_bo_count
;
348 struct intel_overlay_error_state
*overlay
;
349 struct intel_display_error_state
*display
;
350 int hangcheck_score
[I915_NUM_RINGS
];
351 enum intel_ring_hangcheck_action hangcheck_action
[I915_NUM_RINGS
];
354 struct intel_crtc_config
;
359 struct drm_i915_display_funcs
{
360 bool (*fbc_enabled
)(struct drm_device
*dev
);
361 void (*enable_fbc
)(struct drm_crtc
*crtc
, unsigned long interval
);
362 void (*disable_fbc
)(struct drm_device
*dev
);
363 int (*get_display_clock_speed
)(struct drm_device
*dev
);
364 int (*get_fifo_size
)(struct drm_device
*dev
, int plane
);
366 * find_dpll() - Find the best values for the PLL
367 * @limit: limits for the PLL
368 * @crtc: current CRTC
369 * @target: target frequency in kHz
370 * @refclk: reference clock frequency in kHz
371 * @match_clock: if provided, @best_clock P divider must
372 * match the P divider from @match_clock
373 * used for LVDS downclocking
374 * @best_clock: best PLL values found
376 * Returns true on success, false on failure.
378 bool (*find_dpll
)(const struct intel_limit
*limit
,
379 struct drm_crtc
*crtc
,
380 int target
, int refclk
,
381 struct dpll
*match_clock
,
382 struct dpll
*best_clock
);
383 void (*update_wm
)(struct drm_crtc
*crtc
);
384 void (*update_sprite_wm
)(struct drm_plane
*plane
,
385 struct drm_crtc
*crtc
,
386 uint32_t sprite_width
, int pixel_size
,
387 bool enable
, bool scaled
);
388 void (*modeset_global_resources
)(struct drm_device
*dev
);
389 /* Returns the active state of the crtc, and if the crtc is active,
390 * fills out the pipe-config with the hw state. */
391 bool (*get_pipe_config
)(struct intel_crtc
*,
392 struct intel_crtc_config
*);
393 int (*crtc_mode_set
)(struct drm_crtc
*crtc
,
395 struct drm_framebuffer
*old_fb
);
396 void (*crtc_enable
)(struct drm_crtc
*crtc
);
397 void (*crtc_disable
)(struct drm_crtc
*crtc
);
398 void (*off
)(struct drm_crtc
*crtc
);
399 void (*write_eld
)(struct drm_connector
*connector
,
400 struct drm_crtc
*crtc
,
401 struct drm_display_mode
*mode
);
402 void (*fdi_link_train
)(struct drm_crtc
*crtc
);
403 void (*init_clock_gating
)(struct drm_device
*dev
);
404 int (*queue_flip
)(struct drm_device
*dev
, struct drm_crtc
*crtc
,
405 struct drm_framebuffer
*fb
,
406 struct drm_i915_gem_object
*obj
,
408 int (*update_plane
)(struct drm_crtc
*crtc
, struct drm_framebuffer
*fb
,
410 void (*hpd_irq_setup
)(struct drm_device
*dev
);
411 /* clock updates for mode set */
413 /* render clock increase/decrease */
414 /* display clock increase/decrease */
415 /* pll clock increase/decrease */
418 struct intel_uncore_funcs
{
419 void (*force_wake_get
)(struct drm_i915_private
*dev_priv
);
420 void (*force_wake_put
)(struct drm_i915_private
*dev_priv
);
422 uint8_t (*mmio_readb
)(struct drm_i915_private
*dev_priv
, off_t offset
, bool trace
);
423 uint16_t (*mmio_readw
)(struct drm_i915_private
*dev_priv
, off_t offset
, bool trace
);
424 uint32_t (*mmio_readl
)(struct drm_i915_private
*dev_priv
, off_t offset
, bool trace
);
425 uint64_t (*mmio_readq
)(struct drm_i915_private
*dev_priv
, off_t offset
, bool trace
);
427 void (*mmio_writeb
)(struct drm_i915_private
*dev_priv
, off_t offset
,
428 uint8_t val
, bool trace
);
429 void (*mmio_writew
)(struct drm_i915_private
*dev_priv
, off_t offset
,
430 uint16_t val
, bool trace
);
431 void (*mmio_writel
)(struct drm_i915_private
*dev_priv
, off_t offset
,
432 uint32_t val
, bool trace
);
433 void (*mmio_writeq
)(struct drm_i915_private
*dev_priv
, off_t offset
,
434 uint64_t val
, bool trace
);
437 struct intel_uncore
{
438 spinlock_t lock
; /** lock is also taken in irq contexts. */
440 struct intel_uncore_funcs funcs
;
443 unsigned forcewake_count
;
445 struct delayed_work force_wake_work
;
448 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
449 func(is_mobile) sep \
452 func(is_i945gm) sep \
454 func(need_gfx_hws) sep \
456 func(is_pineview) sep \
457 func(is_broadwater) sep \
458 func(is_crestline) sep \
459 func(is_ivybridge) sep \
460 func(is_valleyview) sep \
461 func(is_haswell) sep \
462 func(is_preliminary) sep \
464 func(has_pipe_cxsr) sep \
465 func(has_hotplug) sep \
466 func(cursor_needs_physical) sep \
467 func(has_overlay) sep \
468 func(overlay_needs_physical) sep \
469 func(supports_tv) sep \
474 #define DEFINE_FLAG(name) u8 name:1
475 #define SEP_SEMICOLON ;
477 struct intel_device_info
{
478 u32 display_mmio_offset
;
481 u8 ring_mask
; /* Rings supported by the HW */
482 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG
, SEP_SEMICOLON
);
488 enum i915_cache_level
{
490 I915_CACHE_LLC
, /* also used for snoopable memory on non-LLC */
491 I915_CACHE_L3_LLC
, /* gen7+, L3 sits between the domain specifc
492 caches, eg sampler/render caches, and the
493 large Last-Level-Cache. LLC is coherent with
494 the CPU, but L3 is only visible to the GPU. */
495 I915_CACHE_WT
, /* hsw:gt3e WriteThrough for scanouts */
498 typedef uint32_t gen6_gtt_pte_t
;
500 struct i915_address_space
{
502 struct drm_device
*dev
;
503 struct list_head global_link
;
504 unsigned long start
; /* Start offset always 0 for dri2 */
505 size_t total
; /* size addr space maps (ex. 2GB for ggtt) */
513 * List of objects currently involved in rendering.
515 * Includes buffers having the contents of their GPU caches
516 * flushed, not necessarily primitives. last_rendering_seqno
517 * represents when the rendering involved will be completed.
519 * A reference is held on the buffer while on this list.
521 struct list_head active_list
;
524 * LRU list of objects which are not in the ringbuffer and
525 * are ready to unbind, but are still in the GTT.
527 * last_rendering_seqno is 0 while an object is in this list.
529 * A reference is not held on the buffer while on this list,
530 * as merely being GTT-bound shouldn't prevent its being
531 * freed, and we'll pull it off the list in the free path.
533 struct list_head inactive_list
;
535 /* FIXME: Need a more generic return type */
536 gen6_gtt_pte_t (*pte_encode
)(dma_addr_t addr
,
537 enum i915_cache_level level
,
538 bool valid
); /* Create a valid PTE */
539 void (*clear_range
)(struct i915_address_space
*vm
,
540 unsigned int first_entry
,
541 unsigned int num_entries
,
543 void (*insert_entries
)(struct i915_address_space
*vm
,
545 unsigned int first_entry
,
546 enum i915_cache_level cache_level
);
547 void (*cleanup
)(struct i915_address_space
*vm
);
550 /* The Graphics Translation Table is the way in which GEN hardware translates a
551 * Graphics Virtual Address into a Physical Address. In addition to the normal
552 * collateral associated with any va->pa translations GEN hardware also has a
553 * portion of the GTT which can be mapped by the CPU and remain both coherent
554 * and correct (in cases like swizzling). That region is referred to as GMADR in
558 struct i915_address_space base
;
559 size_t stolen_size
; /* Total size of stolen memory */
561 unsigned long mappable_end
; /* End offset that we can CPU map */
562 struct io_mapping
*mappable
; /* Mapping to our CPU mappable region */
563 phys_addr_t mappable_base
; /* PA of our GMADR */
565 /** "Graphics Stolen Memory" holds the global PTEs */
573 int (*gtt_probe
)(struct drm_device
*dev
, size_t *gtt_total
,
574 size_t *stolen
, phys_addr_t
*mappable_base
,
575 unsigned long *mappable_end
);
577 #define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
579 struct i915_hw_ppgtt
{
580 struct i915_address_space base
;
581 unsigned num_pd_entries
;
583 struct page
**pt_pages
;
584 struct page
*gen8_pt_pages
;
586 struct page
*pd_pages
;
591 dma_addr_t pd_dma_addr
[4];
594 dma_addr_t
*pt_dma_addr
;
595 dma_addr_t
*gen8_pt_dma_addr
[4];
597 int (*enable
)(struct drm_device
*dev
);
601 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
602 * VMA's presence cannot be guaranteed before binding, or after unbinding the
603 * object into/from the address space.
605 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
606 * will always be <= an objects lifetime. So object refcounting should cover us.
609 struct drm_mm_node node
;
610 struct drm_i915_gem_object
*obj
;
611 struct i915_address_space
*vm
;
613 /** This object's place on the active/inactive lists */
614 struct list_head mm_list
;
616 struct list_head vma_link
; /* Link in the object's VMA list */
618 /** This vma's place in the batchbuffer or on the eviction list */
619 struct list_head exec_list
;
622 * Used for performing relocations during execbuffer insertion.
624 struct hlist_node exec_node
;
625 unsigned long exec_handle
;
626 struct drm_i915_gem_exec_object2
*exec_entry
;
630 struct i915_ctx_hang_stats
{
631 /* This context had batch pending when hang was declared */
632 unsigned batch_pending
;
634 /* This context had batch active when hang was declared */
635 unsigned batch_active
;
637 /* Time when this context was last blamed for a GPU reset */
638 unsigned long guilty_ts
;
640 /* This context is banned to submit more work */
644 /* This must match up with the value previously used for execbuf2.rsvd1. */
645 #define DEFAULT_CONTEXT_ID 0
646 struct i915_hw_context
{
651 struct drm_i915_file_private
*file_priv
;
652 struct intel_ring_buffer
*ring
;
653 struct drm_i915_gem_object
*obj
;
654 struct i915_ctx_hang_stats hang_stats
;
656 struct list_head link
;
665 struct drm_mm_node
*compressed_fb
;
666 struct drm_mm_node
*compressed_llb
;
668 struct intel_fbc_work
{
669 struct delayed_work work
;
670 struct drm_crtc
*crtc
;
671 struct drm_framebuffer
*fb
;
676 FBC_OK
, /* FBC is enabled */
677 FBC_UNSUPPORTED
, /* FBC is not supported by this chipset */
678 FBC_NO_OUTPUT
, /* no outputs enabled to compress */
679 FBC_STOLEN_TOO_SMALL
, /* not enough space for buffers */
680 FBC_UNSUPPORTED_MODE
, /* interlace or doublescanned mode */
681 FBC_MODE_TOO_LARGE
, /* mode too large for compression */
682 FBC_BAD_PLANE
, /* fbc not supported on plane */
683 FBC_NOT_TILED
, /* buffer not tiled */
684 FBC_MULTIPLE_PIPES
, /* more than one pipe active */
686 FBC_CHIP_DEFAULT
, /* disabled by default on this chip */
696 PCH_NONE
= 0, /* No PCH present */
697 PCH_IBX
, /* Ibexpeak PCH */
698 PCH_CPT
, /* Cougarpoint PCH */
699 PCH_LPT
, /* Lynxpoint PCH */
703 enum intel_sbi_destination
{
708 #define QUIRK_PIPEA_FORCE (1<<0)
709 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
710 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
711 #define QUIRK_NO_PCH_PWM_ENABLE (1<<3)
714 struct intel_fbc_work
;
717 struct i2c_adapter adapter
;
721 struct i2c_algo_bit_data bit_algo
;
722 struct drm_i915_private
*dev_priv
;
725 struct i915_suspend_saved_registers
{
746 u32 saveTRANS_HTOTAL_A
;
747 u32 saveTRANS_HBLANK_A
;
748 u32 saveTRANS_HSYNC_A
;
749 u32 saveTRANS_VTOTAL_A
;
750 u32 saveTRANS_VBLANK_A
;
751 u32 saveTRANS_VSYNC_A
;
759 u32 savePFIT_PGM_RATIOS
;
760 u32 saveBLC_HIST_CTL
;
762 u32 saveBLC_PWM_CTL2
;
763 u32 saveBLC_HIST_CTL_B
;
764 u32 saveBLC_PWM_CTL_B
;
765 u32 saveBLC_PWM_CTL2_B
;
766 u32 saveBLC_CPU_PWM_CTL
;
767 u32 saveBLC_CPU_PWM_CTL2
;
780 u32 saveTRANS_HTOTAL_B
;
781 u32 saveTRANS_HBLANK_B
;
782 u32 saveTRANS_HSYNC_B
;
783 u32 saveTRANS_VTOTAL_B
;
784 u32 saveTRANS_VBLANK_B
;
785 u32 saveTRANS_VSYNC_B
;
799 u32 savePP_ON_DELAYS
;
800 u32 savePP_OFF_DELAYS
;
808 u32 savePFIT_CONTROL
;
809 u32 save_palette_a
[256];
810 u32 save_palette_b
[256];
811 u32 saveDPFC_CB_BASE
;
812 u32 saveFBC_CFB_BASE
;
815 u32 saveFBC_CONTROL2
;
825 u32 saveCACHE_MODE_0
;
826 u32 saveMI_ARB_STATE
;
837 uint64_t saveFENCE
[I915_MAX_NUM_FENCES
];
848 u32 savePIPEA_GMCH_DATA_M
;
849 u32 savePIPEB_GMCH_DATA_M
;
850 u32 savePIPEA_GMCH_DATA_N
;
851 u32 savePIPEB_GMCH_DATA_N
;
852 u32 savePIPEA_DP_LINK_M
;
853 u32 savePIPEB_DP_LINK_M
;
854 u32 savePIPEA_DP_LINK_N
;
855 u32 savePIPEB_DP_LINK_N
;
866 u32 savePCH_DREF_CONTROL
;
867 u32 saveDISP_ARB_CTL
;
868 u32 savePIPEA_DATA_M1
;
869 u32 savePIPEA_DATA_N1
;
870 u32 savePIPEA_LINK_M1
;
871 u32 savePIPEA_LINK_N1
;
872 u32 savePIPEB_DATA_M1
;
873 u32 savePIPEB_DATA_N1
;
874 u32 savePIPEB_LINK_M1
;
875 u32 savePIPEB_LINK_N1
;
876 u32 saveMCHBAR_RENDER_STANDBY
;
877 u32 savePCH_PORT_HOTPLUG
;
880 struct intel_gen6_power_mgmt
{
881 /* work and pm_iir are protected by dev_priv->irq_lock */
882 struct work_struct work
;
885 /* The below variables an all the rps hw state are protected by
886 * dev->struct mutext. */
896 enum { LOW_POWER
, BETWEEN
, HIGH_POWER
} power
;
899 struct delayed_work delayed_resume_work
;
902 * Protects RPS/RC6 register access and PCU communication.
903 * Must be taken after struct_mutex if nested.
905 struct mutex hw_lock
;
908 /* defined intel_pm.c */
909 extern spinlock_t mchdev_lock
;
911 struct intel_ilk_power_mgmt
{
919 unsigned long last_time1
;
920 unsigned long chipset_power
;
922 struct timespec last_time2
;
923 unsigned long gfx_power
;
929 struct drm_i915_gem_object
*pwrctx
;
930 struct drm_i915_gem_object
*renderctx
;
933 /* Power well structure for haswell */
934 struct i915_power_well
{
935 /* power well enable/disable usage count */
939 #define I915_MAX_POWER_WELLS 1
941 struct i915_power_domains
{
943 * Power wells needed for initialization at driver init and suspend
944 * time are on. They are kept on until after the first modeset.
949 struct i915_power_well power_wells
[I915_MAX_POWER_WELLS
];
952 struct i915_dri1_state
{
953 unsigned allow_batchbuffer
: 1;
954 u32 __iomem
*gfx_hws_cpu_addr
;
965 struct i915_ums_state
{
967 * Flag if the X Server, and thus DRM, is not currently in
968 * control of the device.
970 * This is set between LeaveVT and EnterVT. It needs to be
971 * replaced with a semaphore. It also needs to be
972 * transitioned away from for kernel modesetting.
977 #define MAX_L3_SLICES 2
978 struct intel_l3_parity
{
979 u32
*remap_info
[MAX_L3_SLICES
];
980 struct work_struct error_work
;
985 /** Memory allocator for GTT stolen memory */
986 struct drm_mm stolen
;
987 /** List of all objects in gtt_space. Used to restore gtt
988 * mappings on resume */
989 struct list_head bound_list
;
991 * List of objects which are not bound to the GTT (thus
992 * are idle and not used by the GPU) but still have
993 * (presumably uncached) pages still attached.
995 struct list_head unbound_list
;
997 /** Usable portion of the GTT for GEM */
998 unsigned long stolen_base
; /* limited to low memory (32-bit) */
1000 /** PPGTT used for aliasing the PPGTT with the GTT */
1001 struct i915_hw_ppgtt
*aliasing_ppgtt
;
1003 struct shrinker inactive_shrinker
;
1004 bool shrinker_no_lock_stealing
;
1006 /** LRU list of objects with fence regs on them. */
1007 struct list_head fence_list
;
1010 * We leave the user IRQ off as much as possible,
1011 * but this means that requests will finish and never
1012 * be retired once the system goes idle. Set a timer to
1013 * fire periodically while the ring is running. When it
1014 * fires, go retire requests.
1016 struct delayed_work retire_work
;
1019 * When we detect an idle GPU, we want to turn on
1020 * powersaving features. So once we see that there
1021 * are no more requests outstanding and no more
1022 * arrive within a small period of time, we fire
1023 * off the idle_work.
1025 struct delayed_work idle_work
;
1028 * Are we in a non-interruptible section of code like
1033 /** Bit 6 swizzling required for X tiling */
1034 uint32_t bit_6_swizzle_x
;
1035 /** Bit 6 swizzling required for Y tiling */
1036 uint32_t bit_6_swizzle_y
;
1038 /* storage for physical objects */
1039 struct drm_i915_gem_phys_object
*phys_objs
[I915_MAX_PHYS_OBJECT
];
1041 /* accounting, useful for userland debugging */
1042 spinlock_t object_stat_lock
;
1043 size_t object_memory
;
1047 struct drm_i915_error_state_buf
{
1056 struct i915_error_state_file_priv
{
1057 struct drm_device
*dev
;
1058 struct drm_i915_error_state
*error
;
1061 struct i915_gpu_error
{
1062 /* For hangcheck timer */
1063 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1064 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
1065 /* Hang gpu twice in this window and your context gets banned */
1066 #define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1068 struct timer_list hangcheck_timer
;
1070 /* For reset and error_state handling. */
1072 /* Protected by the above dev->gpu_error.lock. */
1073 struct drm_i915_error_state
*first_error
;
1074 struct work_struct work
;
1077 unsigned long missed_irq_rings
;
1080 * State variable and reset counter controlling the reset flow
1082 * Upper bits are for the reset counter. This counter is used by the
1083 * wait_seqno code to race-free noticed that a reset event happened and
1084 * that it needs to restart the entire ioctl (since most likely the
1085 * seqno it waited for won't ever signal anytime soon).
1087 * This is important for lock-free wait paths, where no contended lock
1088 * naturally enforces the correct ordering between the bail-out of the
1089 * waiter and the gpu reset work code.
1091 * Lowest bit controls the reset state machine: Set means a reset is in
1092 * progress. This state will (presuming we don't have any bugs) decay
1093 * into either unset (successful reset) or the special WEDGED value (hw
1094 * terminally sour). All waiters on the reset_queue will be woken when
1097 atomic_t reset_counter
;
1100 * Special values/flags for reset_counter
1102 * Note that the code relies on
1103 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
1106 #define I915_RESET_IN_PROGRESS_FLAG 1
1107 #define I915_WEDGED 0xffffffff
1110 * Waitqueue to signal when the reset has completed. Used by clients
1111 * that wait for dev_priv->mm.wedged to settle.
1113 wait_queue_head_t reset_queue
;
1115 /* For gpu hang simulation. */
1116 unsigned int stop_rings
;
1118 /* For missed irq/seqno simulation. */
1119 unsigned int test_irq_rings
;
1122 enum modeset_restore
{
1123 MODESET_ON_LID_OPEN
,
1128 struct ddi_vbt_port_info
{
1129 uint8_t hdmi_level_shift
;
1131 uint8_t supports_dvi
:1;
1132 uint8_t supports_hdmi
:1;
1133 uint8_t supports_dp
:1;
1136 struct intel_vbt_data
{
1137 struct drm_display_mode
*lfp_lvds_vbt_mode
; /* if any */
1138 struct drm_display_mode
*sdvo_lvds_vbt_mode
; /* if any */
1141 unsigned int int_tv_support
:1;
1142 unsigned int lvds_dither
:1;
1143 unsigned int lvds_vbt
:1;
1144 unsigned int int_crt_support
:1;
1145 unsigned int lvds_use_ssc
:1;
1146 unsigned int display_clock_mode
:1;
1147 unsigned int fdi_rx_polarity_inverted
:1;
1149 unsigned int bios_lvds_val
; /* initial [PCH_]LVDS reg val in VBIOS */
1154 int edp_preemphasis
;
1156 bool edp_initialized
;
1159 struct edp_power_seq edp_pps
;
1169 union child_device_config
*child_dev
;
1171 struct ddi_vbt_port_info ddi_port_info
[I915_MAX_PORTS
];
1174 enum intel_ddb_partitioning
{
1176 INTEL_DDB_PART_5_6
, /* IVB+ */
1179 struct intel_wm_level
{
1187 struct hsw_wm_values
{
1188 uint32_t wm_pipe
[3];
1190 uint32_t wm_lp_spr
[3];
1191 uint32_t wm_linetime
[3];
1193 enum intel_ddb_partitioning partitioning
;
1197 * This struct tracks the state needed for the Package C8+ feature.
1199 * Package states C8 and deeper are really deep PC states that can only be
1200 * reached when all the devices on the system allow it, so even if the graphics
1201 * device allows PC8+, it doesn't mean the system will actually get to these
1204 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1205 * is disabled and the GPU is idle. When these conditions are met, we manually
1206 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1209 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1210 * the state of some registers, so when we come back from PC8+ we need to
1211 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1212 * need to take care of the registers kept by RC6.
1214 * The interrupt disabling is part of the requirements. We can only leave the
1215 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1216 * can lock the machine.
1218 * Ideally every piece of our code that needs PC8+ disabled would call
1219 * hsw_disable_package_c8, which would increment disable_count and prevent the
1220 * system from reaching PC8+. But we don't have a symmetric way to do this for
1221 * everything, so we have the requirements_met and gpu_idle variables. When we
1222 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1223 * increase it in the opposite case. The requirements_met variable is true when
1224 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1225 * variable is true when the GPU is idle.
1227 * In addition to everything, we only actually enable PC8+ if disable_count
1228 * stays at zero for at least some seconds. This is implemented with the
1229 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1230 * consecutive times when all screens are disabled and some background app
1231 * queries the state of our connectors, or we have some application constantly
1232 * waking up to use the GPU. Only after the enable_work function actually
1233 * enables PC8+ the "enable" variable will become true, which means that it can
1234 * be false even if disable_count is 0.
1236 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1237 * goes back to false exactly before we reenable the IRQs. We use this variable
1238 * to check if someone is trying to enable/disable IRQs while they're supposed
1239 * to be disabled. This shouldn't happen and we'll print some error messages in
1240 * case it happens, but if it actually happens we'll also update the variables
1241 * inside struct regsave so when we restore the IRQs they will contain the
1242 * latest expected values.
1244 * For more, read "Display Sequences for Package C8" on our documentation.
1246 struct i915_package_c8
{
1247 bool requirements_met
;
1250 /* Only true after the delayed work task actually enables it. */
1254 struct delayed_work enable_work
;
1261 uint32_t gen6_pmimr
;
1265 enum intel_pipe_crc_source
{
1266 INTEL_PIPE_CRC_SOURCE_NONE
,
1267 INTEL_PIPE_CRC_SOURCE_PLANE1
,
1268 INTEL_PIPE_CRC_SOURCE_PLANE2
,
1269 INTEL_PIPE_CRC_SOURCE_PF
,
1270 INTEL_PIPE_CRC_SOURCE_PIPE
,
1271 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1272 INTEL_PIPE_CRC_SOURCE_TV
,
1273 INTEL_PIPE_CRC_SOURCE_DP_B
,
1274 INTEL_PIPE_CRC_SOURCE_DP_C
,
1275 INTEL_PIPE_CRC_SOURCE_DP_D
,
1276 INTEL_PIPE_CRC_SOURCE_AUTO
,
1277 INTEL_PIPE_CRC_SOURCE_MAX
,
1280 struct intel_pipe_crc_entry
{
1285 #define INTEL_PIPE_CRC_ENTRIES_NR 128
1286 struct intel_pipe_crc
{
1288 bool opened
; /* exclusive access to the result file */
1289 struct intel_pipe_crc_entry
*entries
;
1290 enum intel_pipe_crc_source source
;
1292 wait_queue_head_t wq
;
1295 typedef struct drm_i915_private
{
1296 struct drm_device
*dev
;
1297 struct kmem_cache
*slab
;
1299 const struct intel_device_info
*info
;
1301 int relative_constants_mode
;
1305 struct intel_uncore uncore
;
1307 struct intel_gmbus gmbus
[GMBUS_NUM_PORTS
];
1310 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1311 * controller on different i2c buses. */
1312 struct mutex gmbus_mutex
;
1315 * Base address of the gmbus and gpio block.
1317 uint32_t gpio_mmio_base
;
1319 wait_queue_head_t gmbus_wait_queue
;
1321 struct pci_dev
*bridge_dev
;
1322 struct intel_ring_buffer ring
[I915_NUM_RINGS
];
1323 uint32_t last_seqno
, next_seqno
;
1325 drm_dma_handle_t
*status_page_dmah
;
1326 struct resource mch_res
;
1328 atomic_t irq_received
;
1330 /* protects the irq masks */
1331 spinlock_t irq_lock
;
1333 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1334 struct pm_qos_request pm_qos
;
1336 /* DPIO indirect register protection */
1337 struct mutex dpio_lock
;
1339 /** Cached value of IMR to avoid reads in updating the bitfield */
1342 u32 de_irq_mask
[I915_MAX_PIPES
];
1347 struct work_struct hotplug_work
;
1348 bool enable_hotplug_processing
;
1350 unsigned long hpd_last_jiffies
;
1355 HPD_MARK_DISABLED
= 2
1357 } hpd_stats
[HPD_NUM_PINS
];
1359 struct timer_list hotplug_reenable_timer
;
1363 struct i915_fbc fbc
;
1364 struct intel_opregion opregion
;
1365 struct intel_vbt_data vbt
;
1368 struct intel_overlay
*overlay
;
1369 unsigned int sprite_scaling_enabled
;
1375 spinlock_t lock
; /* bl registers and the above bl fields */
1376 struct backlight_device
*device
;
1380 bool no_aux_handshake
;
1382 struct drm_i915_fence_reg fence_regs
[I915_MAX_NUM_FENCES
]; /* assume 965 */
1383 int fence_reg_start
; /* 4 if userland hasn't ioctl'd us yet */
1384 int num_fence_regs
; /* 8 on pre-965, 16 otherwise */
1386 unsigned int fsb_freq
, mem_freq
, is_ddr3
;
1389 * wq - Driver workqueue for GEM.
1391 * NOTE: Work items scheduled here are not allowed to grab any modeset
1392 * locks, for otherwise the flushing done in the pageflip code will
1393 * result in deadlocks.
1395 struct workqueue_struct
*wq
;
1397 /* Display functions */
1398 struct drm_i915_display_funcs display
;
1400 /* PCH chipset type */
1401 enum intel_pch pch_type
;
1402 unsigned short pch_id
;
1404 unsigned long quirks
;
1406 enum modeset_restore modeset_restore
;
1407 struct mutex modeset_restore_lock
;
1409 struct list_head vm_list
; /* Global list of all address spaces */
1410 struct i915_gtt gtt
; /* VMA representing the global address space */
1412 struct i915_gem_mm mm
;
1414 /* Kernel Modesetting */
1416 struct sdvo_device_mapping sdvo_mappings
[2];
1418 struct drm_crtc
*plane_to_crtc_mapping
[3];
1419 struct drm_crtc
*pipe_to_crtc_mapping
[3];
1420 wait_queue_head_t pending_flip_queue
;
1422 #ifdef CONFIG_DEBUG_FS
1423 struct intel_pipe_crc pipe_crc
[I915_MAX_PIPES
];
1426 int num_shared_dpll
;
1427 struct intel_shared_dpll shared_dplls
[I915_NUM_PLLS
];
1428 struct intel_ddi_plls ddi_plls
;
1430 /* Reclocking support */
1431 bool render_reclock_avail
;
1432 bool lvds_downclock_avail
;
1433 /* indicates the reduced downclock for LVDS*/
1437 bool mchbar_need_disable
;
1439 struct intel_l3_parity l3_parity
;
1441 /* Cannot be determined by PCIID. You must always read a register. */
1444 /* gen6+ rps state */
1445 struct intel_gen6_power_mgmt rps
;
1447 /* ilk-only ips/rps state. Everything in here is protected by the global
1448 * mchdev_lock in intel_pm.c */
1449 struct intel_ilk_power_mgmt ips
;
1451 struct i915_power_domains power_domains
;
1453 struct i915_psr psr
;
1455 struct i915_gpu_error gpu_error
;
1457 struct drm_i915_gem_object
*vlv_pctx
;
1459 #ifdef CONFIG_DRM_I915_FBDEV
1460 /* list of fbdev register on this device */
1461 struct intel_fbdev
*fbdev
;
1465 * The console may be contended at resume, but we don't
1466 * want it to block on it.
1468 struct work_struct console_resume_work
;
1470 struct drm_property
*broadcast_rgb_property
;
1471 struct drm_property
*force_audio_property
;
1473 bool hw_contexts_disabled
;
1474 uint32_t hw_context_size
;
1475 struct list_head context_list
;
1479 struct i915_suspend_saved_registers regfile
;
1483 * Raw watermark latency values:
1484 * in 0.1us units for WM0,
1485 * in 0.5us units for WM1+.
1488 uint16_t pri_latency
[5];
1490 uint16_t spr_latency
[5];
1492 uint16_t cur_latency
[5];
1494 /* current hardware state */
1495 struct hsw_wm_values hw
;
1498 struct i915_package_c8 pc8
;
1500 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1502 struct i915_dri1_state dri1
;
1503 /* Old ums support infrastructure, same warning applies. */
1504 struct i915_ums_state ums
;
1505 } drm_i915_private_t
;
1507 static inline struct drm_i915_private
*to_i915(const struct drm_device
*dev
)
1509 return dev
->dev_private
;
1512 /* Iterate over initialised rings */
1513 #define for_each_ring(ring__, dev_priv__, i__) \
1514 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1515 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1517 enum hdmi_force_audio
{
1518 HDMI_AUDIO_OFF_DVI
= -2, /* no aux data for HDMI-DVI converter */
1519 HDMI_AUDIO_OFF
, /* force turn off HDMI audio */
1520 HDMI_AUDIO_AUTO
, /* trust EDID */
1521 HDMI_AUDIO_ON
, /* force turn on HDMI audio */
1524 #define I915_GTT_OFFSET_NONE ((u32)-1)
1526 struct drm_i915_gem_object_ops
{
1527 /* Interface between the GEM object and its backing storage.
1528 * get_pages() is called once prior to the use of the associated set
1529 * of pages before to binding them into the GTT, and put_pages() is
1530 * called after we no longer need them. As we expect there to be
1531 * associated cost with migrating pages between the backing storage
1532 * and making them available for the GPU (e.g. clflush), we may hold
1533 * onto the pages after they are no longer referenced by the GPU
1534 * in case they may be used again shortly (for example migrating the
1535 * pages to a different memory domain within the GTT). put_pages()
1536 * will therefore most likely be called when the object itself is
1537 * being released or under memory pressure (where we attempt to
1538 * reap pages for the shrinker).
1540 int (*get_pages
)(struct drm_i915_gem_object
*);
1541 void (*put_pages
)(struct drm_i915_gem_object
*);
1544 struct drm_i915_gem_object
{
1545 struct drm_gem_object base
;
1547 const struct drm_i915_gem_object_ops
*ops
;
1549 /** List of VMAs backed by this object */
1550 struct list_head vma_list
;
1552 /** Stolen memory for this object, instead of being backed by shmem. */
1553 struct drm_mm_node
*stolen
;
1554 struct list_head global_list
;
1556 struct list_head ring_list
;
1557 /** Used in execbuf to temporarily hold a ref */
1558 struct list_head obj_exec_link
;
1561 * This is set if the object is on the active lists (has pending
1562 * rendering and so a non-zero seqno), and is not set if it i s on
1563 * inactive (ready to be unbound) list.
1565 unsigned int active
:1;
1568 * This is set if the object has been written to since last bound
1571 unsigned int dirty
:1;
1574 * Fence register bits (if any) for this object. Will be set
1575 * as needed when mapped into the GTT.
1576 * Protected by dev->struct_mutex.
1578 signed int fence_reg
:I915_MAX_NUM_FENCE_BITS
;
1581 * Advice: are the backing pages purgeable?
1583 unsigned int madv
:2;
1586 * Current tiling mode for the object.
1588 unsigned int tiling_mode
:2;
1590 * Whether the tiling parameters for the currently associated fence
1591 * register have changed. Note that for the purposes of tracking
1592 * tiling changes we also treat the unfenced register, the register
1593 * slot that the object occupies whilst it executes a fenced
1594 * command (such as BLT on gen2/3), as a "fence".
1596 unsigned int fence_dirty
:1;
1598 /** How many users have pinned this object in GTT space. The following
1599 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1600 * (via user_pin_count), execbuffer (objects are not allowed multiple
1601 * times for the same batchbuffer), and the framebuffer code. When
1602 * switching/pageflipping, the framebuffer code has at most two buffers
1605 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1606 * bits with absolutely no headroom. So use 4 bits. */
1607 unsigned int pin_count
:4;
1608 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1611 * Is the object at the current location in the gtt mappable and
1612 * fenceable? Used to avoid costly recalculations.
1614 unsigned int map_and_fenceable
:1;
1617 * Whether the current gtt mapping needs to be mappable (and isn't just
1618 * mappable by accident). Track pin and fault separate for a more
1619 * accurate mappable working set.
1621 unsigned int fault_mappable
:1;
1622 unsigned int pin_mappable
:1;
1623 unsigned int pin_display
:1;
1626 * Is the GPU currently using a fence to access this buffer,
1628 unsigned int pending_fenced_gpu_access
:1;
1629 unsigned int fenced_gpu_access
:1;
1631 unsigned int cache_level
:3;
1633 unsigned int has_aliasing_ppgtt_mapping
:1;
1634 unsigned int has_global_gtt_mapping
:1;
1635 unsigned int has_dma_mapping
:1;
1637 struct sg_table
*pages
;
1638 int pages_pin_count
;
1640 /* prime dma-buf support */
1641 void *dma_buf_vmapping
;
1644 struct intel_ring_buffer
*ring
;
1646 /** Breadcrumb of last rendering to the buffer. */
1647 uint32_t last_read_seqno
;
1648 uint32_t last_write_seqno
;
1649 /** Breadcrumb of last fenced GPU access to the buffer. */
1650 uint32_t last_fenced_seqno
;
1652 /** Current tiling stride for the object, if it's tiled. */
1655 /** References from framebuffers, locks out tiling changes. */
1656 unsigned long framebuffer_references
;
1658 /** Record of address bit 17 of each page at last unbind. */
1659 unsigned long *bit_17
;
1661 /** User space pin count and filp owning the pin */
1662 unsigned long user_pin_count
;
1663 struct drm_file
*pin_filp
;
1665 /** for phy allocated objects */
1666 struct drm_i915_gem_phys_object
*phys_obj
;
1668 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1670 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1673 * Request queue structure.
1675 * The request queue allows us to note sequence numbers that have been emitted
1676 * and may be associated with active buffers to be retired.
1678 * By keeping this list, we can avoid having to do questionable
1679 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1680 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1682 struct drm_i915_gem_request
{
1683 /** On Which ring this request was generated */
1684 struct intel_ring_buffer
*ring
;
1686 /** GEM sequence number associated with this request. */
1689 /** Position in the ringbuffer of the start of the request */
1692 /** Position in the ringbuffer of the end of the request */
1695 /** Context related to this request */
1696 struct i915_hw_context
*ctx
;
1698 /** Batch buffer related to this request if any */
1699 struct drm_i915_gem_object
*batch_obj
;
1701 /** Time at which this request was emitted, in jiffies. */
1702 unsigned long emitted_jiffies
;
1704 /** global list entry for this request */
1705 struct list_head list
;
1707 struct drm_i915_file_private
*file_priv
;
1708 /** file_priv list entry for this request */
1709 struct list_head client_list
;
1712 struct drm_i915_file_private
{
1713 struct drm_i915_private
*dev_priv
;
1717 struct list_head request_list
;
1718 struct delayed_work idle_work
;
1720 struct idr context_idr
;
1722 struct i915_ctx_hang_stats hang_stats
;
1723 atomic_t rps_wait_boost
;
1726 #define INTEL_INFO(dev) (to_i915(dev)->info)
1728 #define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1729 #define IS_845G(dev) ((dev)->pdev->device == 0x2562)
1730 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1731 #define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
1732 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1733 #define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
1734 #define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
1735 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1736 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1737 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1738 #define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
1739 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1740 #define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
1741 #define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
1742 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1743 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1744 #define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
1745 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1746 #define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
1747 (dev)->pdev->device == 0x0152 || \
1748 (dev)->pdev->device == 0x015a)
1749 #define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
1750 (dev)->pdev->device == 0x0106 || \
1751 (dev)->pdev->device == 0x010A)
1752 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1753 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1754 #define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
1755 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1756 #define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
1757 ((dev)->pdev->device & 0xFF00) == 0x0C00)
1758 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1759 ((dev)->pdev->device & 0xFF00) == 0x0A00)
1760 #define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
1761 ((dev)->pdev->device & 0x00F0) == 0x0020)
1762 #define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
1765 * The genX designation typically refers to the render engine, so render
1766 * capability related checks should use IS_GEN, while display and other checks
1767 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1770 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1771 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1772 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1773 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1774 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1775 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1776 #define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
1778 #define RENDER_RING (1<<RCS)
1779 #define BSD_RING (1<<VCS)
1780 #define BLT_RING (1<<BCS)
1781 #define VEBOX_RING (1<<VECS)
1782 #define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
1783 #define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
1784 #define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
1785 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1786 #define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
1787 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1789 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1790 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1792 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1793 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1795 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1796 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1798 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1799 * rows, which changed the alignment requirements and fence programming.
1801 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1803 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1804 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1805 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1806 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1807 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1809 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1810 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1811 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1813 #define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
1815 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1816 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1817 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1818 #define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
1819 #define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
1821 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1822 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1823 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1824 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1825 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1826 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1828 #define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
1829 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1830 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1831 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1832 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1833 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1835 /* DPF == dynamic parity feature */
1836 #define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1837 #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
1839 #define GT_FREQUENCY_MULTIPLIER 50
1841 #include "i915_trace.h"
1843 extern const struct drm_ioctl_desc i915_ioctls
[];
1844 extern int i915_max_ioctl
;
1845 extern unsigned int i915_fbpercrtc __always_unused
;
1846 extern int i915_panel_ignore_lid __read_mostly
;
1847 extern unsigned int i915_powersave __read_mostly
;
1848 extern int i915_semaphores __read_mostly
;
1849 extern unsigned int i915_lvds_downclock __read_mostly
;
1850 extern int i915_lvds_channel_mode __read_mostly
;
1851 extern int i915_panel_use_ssc __read_mostly
;
1852 extern int i915_vbt_sdvo_panel_type __read_mostly
;
1853 extern int i915_enable_rc6 __read_mostly
;
1854 extern int i915_enable_fbc __read_mostly
;
1855 extern bool i915_enable_hangcheck __read_mostly
;
1856 extern int i915_enable_ppgtt __read_mostly
;
1857 extern int i915_enable_psr __read_mostly
;
1858 extern unsigned int i915_preliminary_hw_support __read_mostly
;
1859 extern int i915_disable_power_well __read_mostly
;
1860 extern int i915_enable_ips __read_mostly
;
1861 extern bool i915_fastboot __read_mostly
;
1862 extern int i915_enable_pc8 __read_mostly
;
1863 extern int i915_pc8_timeout __read_mostly
;
1864 extern bool i915_prefault_disable __read_mostly
;
1866 extern int i915_suspend(struct drm_device
*dev
, pm_message_t state
);
1867 extern int i915_resume(struct drm_device
*dev
);
1868 extern int i915_master_create(struct drm_device
*dev
, struct drm_master
*master
);
1869 extern void i915_master_destroy(struct drm_device
*dev
, struct drm_master
*master
);
1872 void i915_update_dri1_breadcrumb(struct drm_device
*dev
);
1873 extern void i915_kernel_lost_context(struct drm_device
* dev
);
1874 extern int i915_driver_load(struct drm_device
*, unsigned long flags
);
1875 extern int i915_driver_unload(struct drm_device
*);
1876 extern int i915_driver_open(struct drm_device
*dev
, struct drm_file
*file_priv
);
1877 extern void i915_driver_lastclose(struct drm_device
* dev
);
1878 extern void i915_driver_preclose(struct drm_device
*dev
,
1879 struct drm_file
*file_priv
);
1880 extern void i915_driver_postclose(struct drm_device
*dev
,
1881 struct drm_file
*file_priv
);
1882 extern int i915_driver_device_is_agp(struct drm_device
* dev
);
1883 #ifdef CONFIG_COMPAT
1884 extern long i915_compat_ioctl(struct file
*filp
, unsigned int cmd
,
1887 extern int i915_emit_box(struct drm_device
*dev
,
1888 struct drm_clip_rect
*box
,
1890 extern int intel_gpu_reset(struct drm_device
*dev
);
1891 extern int i915_reset(struct drm_device
*dev
);
1892 extern unsigned long i915_chipset_val(struct drm_i915_private
*dev_priv
);
1893 extern unsigned long i915_mch_val(struct drm_i915_private
*dev_priv
);
1894 extern unsigned long i915_gfx_val(struct drm_i915_private
*dev_priv
);
1895 extern void i915_update_gfx_val(struct drm_i915_private
*dev_priv
);
1897 extern void intel_console_resume(struct work_struct
*work
);
1900 void i915_queue_hangcheck(struct drm_device
*dev
);
1901 void i915_handle_error(struct drm_device
*dev
, bool wedged
);
1903 extern void intel_irq_init(struct drm_device
*dev
);
1904 extern void intel_pm_init(struct drm_device
*dev
);
1905 extern void intel_hpd_init(struct drm_device
*dev
);
1906 extern void intel_pm_init(struct drm_device
*dev
);
1908 extern void intel_uncore_sanitize(struct drm_device
*dev
);
1909 extern void intel_uncore_early_sanitize(struct drm_device
*dev
);
1910 extern void intel_uncore_init(struct drm_device
*dev
);
1911 extern void intel_uncore_clear_errors(struct drm_device
*dev
);
1912 extern void intel_uncore_check_errors(struct drm_device
*dev
);
1913 extern void intel_uncore_fini(struct drm_device
*dev
);
1916 i915_enable_pipestat(drm_i915_private_t
*dev_priv
, enum pipe pipe
, u32 mask
);
1919 i915_disable_pipestat(drm_i915_private_t
*dev_priv
, enum pipe pipe
, u32 mask
);
1922 int i915_gem_init_ioctl(struct drm_device
*dev
, void *data
,
1923 struct drm_file
*file_priv
);
1924 int i915_gem_create_ioctl(struct drm_device
*dev
, void *data
,
1925 struct drm_file
*file_priv
);
1926 int i915_gem_pread_ioctl(struct drm_device
*dev
, void *data
,
1927 struct drm_file
*file_priv
);
1928 int i915_gem_pwrite_ioctl(struct drm_device
*dev
, void *data
,
1929 struct drm_file
*file_priv
);
1930 int i915_gem_mmap_ioctl(struct drm_device
*dev
, void *data
,
1931 struct drm_file
*file_priv
);
1932 int i915_gem_mmap_gtt_ioctl(struct drm_device
*dev
, void *data
,
1933 struct drm_file
*file_priv
);
1934 int i915_gem_set_domain_ioctl(struct drm_device
*dev
, void *data
,
1935 struct drm_file
*file_priv
);
1936 int i915_gem_sw_finish_ioctl(struct drm_device
*dev
, void *data
,
1937 struct drm_file
*file_priv
);
1938 int i915_gem_execbuffer(struct drm_device
*dev
, void *data
,
1939 struct drm_file
*file_priv
);
1940 int i915_gem_execbuffer2(struct drm_device
*dev
, void *data
,
1941 struct drm_file
*file_priv
);
1942 int i915_gem_pin_ioctl(struct drm_device
*dev
, void *data
,
1943 struct drm_file
*file_priv
);
1944 int i915_gem_unpin_ioctl(struct drm_device
*dev
, void *data
,
1945 struct drm_file
*file_priv
);
1946 int i915_gem_busy_ioctl(struct drm_device
*dev
, void *data
,
1947 struct drm_file
*file_priv
);
1948 int i915_gem_get_caching_ioctl(struct drm_device
*dev
, void *data
,
1949 struct drm_file
*file
);
1950 int i915_gem_set_caching_ioctl(struct drm_device
*dev
, void *data
,
1951 struct drm_file
*file
);
1952 int i915_gem_throttle_ioctl(struct drm_device
*dev
, void *data
,
1953 struct drm_file
*file_priv
);
1954 int i915_gem_madvise_ioctl(struct drm_device
*dev
, void *data
,
1955 struct drm_file
*file_priv
);
1956 int i915_gem_entervt_ioctl(struct drm_device
*dev
, void *data
,
1957 struct drm_file
*file_priv
);
1958 int i915_gem_leavevt_ioctl(struct drm_device
*dev
, void *data
,
1959 struct drm_file
*file_priv
);
1960 int i915_gem_set_tiling(struct drm_device
*dev
, void *data
,
1961 struct drm_file
*file_priv
);
1962 int i915_gem_get_tiling(struct drm_device
*dev
, void *data
,
1963 struct drm_file
*file_priv
);
1964 int i915_gem_get_aperture_ioctl(struct drm_device
*dev
, void *data
,
1965 struct drm_file
*file_priv
);
1966 int i915_gem_wait_ioctl(struct drm_device
*dev
, void *data
,
1967 struct drm_file
*file_priv
);
1968 void i915_gem_load(struct drm_device
*dev
);
1969 void *i915_gem_object_alloc(struct drm_device
*dev
);
1970 void i915_gem_object_free(struct drm_i915_gem_object
*obj
);
1971 void i915_gem_object_init(struct drm_i915_gem_object
*obj
,
1972 const struct drm_i915_gem_object_ops
*ops
);
1973 struct drm_i915_gem_object
*i915_gem_alloc_object(struct drm_device
*dev
,
1975 void i915_gem_free_object(struct drm_gem_object
*obj
);
1976 void i915_gem_vma_destroy(struct i915_vma
*vma
);
1978 int __must_check
i915_gem_object_pin(struct drm_i915_gem_object
*obj
,
1979 struct i915_address_space
*vm
,
1981 bool map_and_fenceable
,
1983 void i915_gem_object_unpin(struct drm_i915_gem_object
*obj
);
1984 int __must_check
i915_vma_unbind(struct i915_vma
*vma
);
1985 int __must_check
i915_gem_object_ggtt_unbind(struct drm_i915_gem_object
*obj
);
1986 int i915_gem_object_put_pages(struct drm_i915_gem_object
*obj
);
1987 void i915_gem_release_mmap(struct drm_i915_gem_object
*obj
);
1988 void i915_gem_lastclose(struct drm_device
*dev
);
1990 int __must_check
i915_gem_object_get_pages(struct drm_i915_gem_object
*obj
);
1991 static inline struct page
*i915_gem_object_get_page(struct drm_i915_gem_object
*obj
, int n
)
1993 struct sg_page_iter sg_iter
;
1995 for_each_sg_page(obj
->pages
->sgl
, &sg_iter
, obj
->pages
->nents
, n
)
1996 return sg_page_iter_page(&sg_iter
);
2000 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object
*obj
)
2002 BUG_ON(obj
->pages
== NULL
);
2003 obj
->pages_pin_count
++;
2005 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object
*obj
)
2007 BUG_ON(obj
->pages_pin_count
== 0);
2008 obj
->pages_pin_count
--;
2011 int __must_check
i915_mutex_lock_interruptible(struct drm_device
*dev
);
2012 int i915_gem_object_sync(struct drm_i915_gem_object
*obj
,
2013 struct intel_ring_buffer
*to
);
2014 void i915_vma_move_to_active(struct i915_vma
*vma
,
2015 struct intel_ring_buffer
*ring
);
2016 int i915_gem_dumb_create(struct drm_file
*file_priv
,
2017 struct drm_device
*dev
,
2018 struct drm_mode_create_dumb
*args
);
2019 int i915_gem_mmap_gtt(struct drm_file
*file_priv
, struct drm_device
*dev
,
2020 uint32_t handle
, uint64_t *offset
);
2022 * Returns true if seq1 is later than seq2.
2025 i915_seqno_passed(uint32_t seq1
, uint32_t seq2
)
2027 return (int32_t)(seq1
- seq2
) >= 0;
2030 int __must_check
i915_gem_get_seqno(struct drm_device
*dev
, u32
*seqno
);
2031 int __must_check
i915_gem_set_seqno(struct drm_device
*dev
, u32 seqno
);
2032 int __must_check
i915_gem_object_get_fence(struct drm_i915_gem_object
*obj
);
2033 int __must_check
i915_gem_object_put_fence(struct drm_i915_gem_object
*obj
);
2036 i915_gem_object_pin_fence(struct drm_i915_gem_object
*obj
)
2038 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
) {
2039 struct drm_i915_private
*dev_priv
= obj
->base
.dev
->dev_private
;
2040 dev_priv
->fence_regs
[obj
->fence_reg
].pin_count
++;
2047 i915_gem_object_unpin_fence(struct drm_i915_gem_object
*obj
)
2049 if (obj
->fence_reg
!= I915_FENCE_REG_NONE
) {
2050 struct drm_i915_private
*dev_priv
= obj
->base
.dev
->dev_private
;
2051 WARN_ON(dev_priv
->fence_regs
[obj
->fence_reg
].pin_count
<= 0);
2052 dev_priv
->fence_regs
[obj
->fence_reg
].pin_count
--;
2056 bool i915_gem_retire_requests(struct drm_device
*dev
);
2057 void i915_gem_retire_requests_ring(struct intel_ring_buffer
*ring
);
2058 int __must_check
i915_gem_check_wedge(struct i915_gpu_error
*error
,
2059 bool interruptible
);
2060 static inline bool i915_reset_in_progress(struct i915_gpu_error
*error
)
2062 return unlikely(atomic_read(&error
->reset_counter
)
2063 & I915_RESET_IN_PROGRESS_FLAG
);
2066 static inline bool i915_terminally_wedged(struct i915_gpu_error
*error
)
2068 return atomic_read(&error
->reset_counter
) == I915_WEDGED
;
2071 void i915_gem_reset(struct drm_device
*dev
);
2072 bool i915_gem_clflush_object(struct drm_i915_gem_object
*obj
, bool force
);
2073 int __must_check
i915_gem_object_finish_gpu(struct drm_i915_gem_object
*obj
);
2074 int __must_check
i915_gem_init(struct drm_device
*dev
);
2075 int __must_check
i915_gem_init_hw(struct drm_device
*dev
);
2076 int i915_gem_l3_remap(struct intel_ring_buffer
*ring
, int slice
);
2077 void i915_gem_init_swizzling(struct drm_device
*dev
);
2078 void i915_gem_cleanup_ringbuffer(struct drm_device
*dev
);
2079 int __must_check
i915_gpu_idle(struct drm_device
*dev
);
2080 int __must_check
i915_gem_suspend(struct drm_device
*dev
);
2081 int __i915_add_request(struct intel_ring_buffer
*ring
,
2082 struct drm_file
*file
,
2083 struct drm_i915_gem_object
*batch_obj
,
2085 #define i915_add_request(ring, seqno) \
2086 __i915_add_request(ring, NULL, NULL, seqno)
2087 int __must_check
i915_wait_seqno(struct intel_ring_buffer
*ring
,
2089 int i915_gem_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
);
2091 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object
*obj
,
2094 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object
*obj
, bool write
);
2096 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object
*obj
,
2098 struct intel_ring_buffer
*pipelined
);
2099 void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object
*obj
);
2100 int i915_gem_attach_phys_object(struct drm_device
*dev
,
2101 struct drm_i915_gem_object
*obj
,
2104 void i915_gem_detach_phys_object(struct drm_device
*dev
,
2105 struct drm_i915_gem_object
*obj
);
2106 void i915_gem_free_all_phys_object(struct drm_device
*dev
);
2107 int i915_gem_open(struct drm_device
*dev
, struct drm_file
*file
);
2108 void i915_gem_release(struct drm_device
*dev
, struct drm_file
*file
);
2111 i915_gem_get_gtt_size(struct drm_device
*dev
, uint32_t size
, int tiling_mode
);
2113 i915_gem_get_gtt_alignment(struct drm_device
*dev
, uint32_t size
,
2114 int tiling_mode
, bool fenced
);
2116 int i915_gem_object_set_cache_level(struct drm_i915_gem_object
*obj
,
2117 enum i915_cache_level cache_level
);
2119 struct drm_gem_object
*i915_gem_prime_import(struct drm_device
*dev
,
2120 struct dma_buf
*dma_buf
);
2122 struct dma_buf
*i915_gem_prime_export(struct drm_device
*dev
,
2123 struct drm_gem_object
*gem_obj
, int flags
);
2125 void i915_gem_restore_fences(struct drm_device
*dev
);
2127 unsigned long i915_gem_obj_offset(struct drm_i915_gem_object
*o
,
2128 struct i915_address_space
*vm
);
2129 bool i915_gem_obj_bound_any(struct drm_i915_gem_object
*o
);
2130 bool i915_gem_obj_bound(struct drm_i915_gem_object
*o
,
2131 struct i915_address_space
*vm
);
2132 unsigned long i915_gem_obj_size(struct drm_i915_gem_object
*o
,
2133 struct i915_address_space
*vm
);
2134 struct i915_vma
*i915_gem_obj_to_vma(struct drm_i915_gem_object
*obj
,
2135 struct i915_address_space
*vm
);
2137 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object
*obj
,
2138 struct i915_address_space
*vm
);
2140 struct i915_vma
*i915_gem_obj_to_ggtt(struct drm_i915_gem_object
*obj
);
2142 /* Some GGTT VM helpers */
2143 #define obj_to_ggtt(obj) \
2144 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2145 static inline bool i915_is_ggtt(struct i915_address_space
*vm
)
2147 struct i915_address_space
*ggtt
=
2148 &((struct drm_i915_private
*)(vm
)->dev
->dev_private
)->gtt
.base
;
2152 static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object
*obj
)
2154 return i915_gem_obj_bound(obj
, obj_to_ggtt(obj
));
2157 static inline unsigned long
2158 i915_gem_obj_ggtt_offset(struct drm_i915_gem_object
*obj
)
2160 return i915_gem_obj_offset(obj
, obj_to_ggtt(obj
));
2163 static inline unsigned long
2164 i915_gem_obj_ggtt_size(struct drm_i915_gem_object
*obj
)
2166 return i915_gem_obj_size(obj
, obj_to_ggtt(obj
));
2169 static inline int __must_check
2170 i915_gem_obj_ggtt_pin(struct drm_i915_gem_object
*obj
,
2172 bool map_and_fenceable
,
2175 return i915_gem_object_pin(obj
, obj_to_ggtt(obj
), alignment
,
2176 map_and_fenceable
, nonblocking
);
2179 /* i915_gem_context.c */
2180 void i915_gem_context_init(struct drm_device
*dev
);
2181 void i915_gem_context_fini(struct drm_device
*dev
);
2182 void i915_gem_context_close(struct drm_device
*dev
, struct drm_file
*file
);
2183 int i915_switch_context(struct intel_ring_buffer
*ring
,
2184 struct drm_file
*file
, int to_id
);
2185 void i915_gem_context_free(struct kref
*ctx_ref
);
2186 static inline void i915_gem_context_reference(struct i915_hw_context
*ctx
)
2188 kref_get(&ctx
->ref
);
2191 static inline void i915_gem_context_unreference(struct i915_hw_context
*ctx
)
2193 kref_put(&ctx
->ref
, i915_gem_context_free
);
2196 struct i915_ctx_hang_stats
* __must_check
2197 i915_gem_context_get_hang_stats(struct drm_device
*dev
,
2198 struct drm_file
*file
,
2200 int i915_gem_context_create_ioctl(struct drm_device
*dev
, void *data
,
2201 struct drm_file
*file
);
2202 int i915_gem_context_destroy_ioctl(struct drm_device
*dev
, void *data
,
2203 struct drm_file
*file
);
2205 /* i915_gem_gtt.c */
2206 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device
*dev
);
2207 void i915_ppgtt_bind_object(struct i915_hw_ppgtt
*ppgtt
,
2208 struct drm_i915_gem_object
*obj
,
2209 enum i915_cache_level cache_level
);
2210 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt
*ppgtt
,
2211 struct drm_i915_gem_object
*obj
);
2213 void i915_check_and_clear_faults(struct drm_device
*dev
);
2214 void i915_gem_suspend_gtt_mappings(struct drm_device
*dev
);
2215 void i915_gem_restore_gtt_mappings(struct drm_device
*dev
);
2216 int __must_check
i915_gem_gtt_prepare_object(struct drm_i915_gem_object
*obj
);
2217 void i915_gem_gtt_bind_object(struct drm_i915_gem_object
*obj
,
2218 enum i915_cache_level cache_level
);
2219 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object
*obj
);
2220 void i915_gem_gtt_finish_object(struct drm_i915_gem_object
*obj
);
2221 void i915_gem_init_global_gtt(struct drm_device
*dev
);
2222 void i915_gem_setup_global_gtt(struct drm_device
*dev
, unsigned long start
,
2223 unsigned long mappable_end
, unsigned long end
);
2224 int i915_gem_gtt_init(struct drm_device
*dev
);
2225 static inline void i915_gem_chipset_flush(struct drm_device
*dev
)
2227 if (INTEL_INFO(dev
)->gen
< 6)
2228 intel_gtt_chipset_flush();
2232 /* i915_gem_evict.c */
2233 int __must_check
i915_gem_evict_something(struct drm_device
*dev
,
2234 struct i915_address_space
*vm
,
2237 unsigned cache_level
,
2240 int i915_gem_evict_vm(struct i915_address_space
*vm
, bool do_idle
);
2241 int i915_gem_evict_everything(struct drm_device
*dev
);
2243 /* i915_gem_stolen.c */
2244 int i915_gem_init_stolen(struct drm_device
*dev
);
2245 int i915_gem_stolen_setup_compression(struct drm_device
*dev
, int size
);
2246 void i915_gem_stolen_cleanup_compression(struct drm_device
*dev
);
2247 void i915_gem_cleanup_stolen(struct drm_device
*dev
);
2248 struct drm_i915_gem_object
*
2249 i915_gem_object_create_stolen(struct drm_device
*dev
, u32 size
);
2250 struct drm_i915_gem_object
*
2251 i915_gem_object_create_stolen_for_preallocated(struct drm_device
*dev
,
2255 void i915_gem_object_release_stolen(struct drm_i915_gem_object
*obj
);
2257 /* i915_gem_tiling.c */
2258 static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object
*obj
)
2260 drm_i915_private_t
*dev_priv
= obj
->base
.dev
->dev_private
;
2262 return dev_priv
->mm
.bit_6_swizzle_x
== I915_BIT_6_SWIZZLE_9_10_17
&&
2263 obj
->tiling_mode
!= I915_TILING_NONE
;
2266 void i915_gem_detect_bit_6_swizzle(struct drm_device
*dev
);
2267 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object
*obj
);
2268 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object
*obj
);
2270 /* i915_gem_debug.c */
2272 int i915_verify_lists(struct drm_device
*dev
);
2274 #define i915_verify_lists(dev) 0
2277 /* i915_debugfs.c */
2278 int i915_debugfs_init(struct drm_minor
*minor
);
2279 void i915_debugfs_cleanup(struct drm_minor
*minor
);
2280 #ifdef CONFIG_DEBUG_FS
2281 void intel_display_crc_init(struct drm_device
*dev
);
2283 static inline void intel_display_crc_init(struct drm_device
*dev
) {}
2286 /* i915_gpu_error.c */
2288 void i915_error_printf(struct drm_i915_error_state_buf
*e
, const char *f
, ...);
2289 int i915_error_state_to_str(struct drm_i915_error_state_buf
*estr
,
2290 const struct i915_error_state_file_priv
*error
);
2291 int i915_error_state_buf_init(struct drm_i915_error_state_buf
*eb
,
2292 size_t count
, loff_t pos
);
2293 static inline void i915_error_state_buf_release(
2294 struct drm_i915_error_state_buf
*eb
)
2298 void i915_capture_error_state(struct drm_device
*dev
);
2299 void i915_error_state_get(struct drm_device
*dev
,
2300 struct i915_error_state_file_priv
*error_priv
);
2301 void i915_error_state_put(struct i915_error_state_file_priv
*error_priv
);
2302 void i915_destroy_error_state(struct drm_device
*dev
);
2304 void i915_get_extra_instdone(struct drm_device
*dev
, uint32_t *instdone
);
2305 const char *i915_cache_level_str(int type
);
2307 /* i915_suspend.c */
2308 extern int i915_save_state(struct drm_device
*dev
);
2309 extern int i915_restore_state(struct drm_device
*dev
);
2312 void i915_save_display_reg(struct drm_device
*dev
);
2313 void i915_restore_display_reg(struct drm_device
*dev
);
2316 void i915_setup_sysfs(struct drm_device
*dev_priv
);
2317 void i915_teardown_sysfs(struct drm_device
*dev_priv
);
2320 extern int intel_setup_gmbus(struct drm_device
*dev
);
2321 extern void intel_teardown_gmbus(struct drm_device
*dev
);
2322 static inline bool intel_gmbus_is_port_valid(unsigned port
)
2324 return (port
>= GMBUS_PORT_SSC
&& port
<= GMBUS_PORT_DPD
);
2327 extern struct i2c_adapter
*intel_gmbus_get_adapter(
2328 struct drm_i915_private
*dev_priv
, unsigned port
);
2329 extern void intel_gmbus_set_speed(struct i2c_adapter
*adapter
, int speed
);
2330 extern void intel_gmbus_force_bit(struct i2c_adapter
*adapter
, bool force_bit
);
2331 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter
*adapter
)
2333 return container_of(adapter
, struct intel_gmbus
, adapter
)->force_bit
;
2335 extern void intel_i2c_reset(struct drm_device
*dev
);
2337 /* intel_opregion.c */
2338 struct intel_encoder
;
2339 extern int intel_opregion_setup(struct drm_device
*dev
);
2341 extern void intel_opregion_init(struct drm_device
*dev
);
2342 extern void intel_opregion_fini(struct drm_device
*dev
);
2343 extern void intel_opregion_asle_intr(struct drm_device
*dev
);
2344 extern int intel_opregion_notify_encoder(struct intel_encoder
*intel_encoder
,
2346 extern int intel_opregion_notify_adapter(struct drm_device
*dev
,
2349 static inline void intel_opregion_init(struct drm_device
*dev
) { return; }
2350 static inline void intel_opregion_fini(struct drm_device
*dev
) { return; }
2351 static inline void intel_opregion_asle_intr(struct drm_device
*dev
) { return; }
2353 intel_opregion_notify_encoder(struct intel_encoder
*intel_encoder
, bool enable
)
2358 intel_opregion_notify_adapter(struct drm_device
*dev
, pci_power_t state
)
2366 extern void intel_register_dsm_handler(void);
2367 extern void intel_unregister_dsm_handler(void);
2369 static inline void intel_register_dsm_handler(void) { return; }
2370 static inline void intel_unregister_dsm_handler(void) { return; }
2371 #endif /* CONFIG_ACPI */
2374 extern void intel_modeset_init_hw(struct drm_device
*dev
);
2375 extern void intel_modeset_suspend_hw(struct drm_device
*dev
);
2376 extern void intel_modeset_init(struct drm_device
*dev
);
2377 extern void intel_modeset_gem_init(struct drm_device
*dev
);
2378 extern void intel_modeset_cleanup(struct drm_device
*dev
);
2379 extern int intel_modeset_vga_set_state(struct drm_device
*dev
, bool state
);
2380 extern void intel_modeset_setup_hw_state(struct drm_device
*dev
,
2381 bool force_restore
);
2382 extern void i915_redisable_vga(struct drm_device
*dev
);
2383 extern bool intel_fbc_enabled(struct drm_device
*dev
);
2384 extern void intel_disable_fbc(struct drm_device
*dev
);
2385 extern bool ironlake_set_drps(struct drm_device
*dev
, u8 val
);
2386 extern void intel_init_pch_refclk(struct drm_device
*dev
);
2387 extern void gen6_set_rps(struct drm_device
*dev
, u8 val
);
2388 extern void valleyview_set_rps(struct drm_device
*dev
, u8 val
);
2389 extern int valleyview_rps_max_freq(struct drm_i915_private
*dev_priv
);
2390 extern int valleyview_rps_min_freq(struct drm_i915_private
*dev_priv
);
2391 extern void intel_detect_pch(struct drm_device
*dev
);
2392 extern int intel_trans_dp_port_sel(struct drm_crtc
*crtc
);
2393 extern int intel_enable_rc6(const struct drm_device
*dev
);
2395 extern bool i915_semaphore_is_enabled(struct drm_device
*dev
);
2396 int i915_reg_read_ioctl(struct drm_device
*dev
, void *data
,
2397 struct drm_file
*file
);
2400 extern struct intel_overlay_error_state
*intel_overlay_capture_error_state(struct drm_device
*dev
);
2401 extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf
*e
,
2402 struct intel_overlay_error_state
*error
);
2404 extern struct intel_display_error_state
*intel_display_capture_error_state(struct drm_device
*dev
);
2405 extern void intel_display_print_error_state(struct drm_i915_error_state_buf
*e
,
2406 struct drm_device
*dev
,
2407 struct intel_display_error_state
*error
);
2409 /* On SNB platform, before reading ring registers forcewake bit
2410 * must be set to prevent GT core from power down and stale values being
2413 void gen6_gt_force_wake_get(struct drm_i915_private
*dev_priv
);
2414 void gen6_gt_force_wake_put(struct drm_i915_private
*dev_priv
);
2416 int sandybridge_pcode_read(struct drm_i915_private
*dev_priv
, u8 mbox
, u32
*val
);
2417 int sandybridge_pcode_write(struct drm_i915_private
*dev_priv
, u8 mbox
, u32 val
);
2419 /* intel_sideband.c */
2420 u32
vlv_punit_read(struct drm_i915_private
*dev_priv
, u8 addr
);
2421 void vlv_punit_write(struct drm_i915_private
*dev_priv
, u8 addr
, u32 val
);
2422 u32
vlv_nc_read(struct drm_i915_private
*dev_priv
, u8 addr
);
2423 u32
vlv_gpio_nc_read(struct drm_i915_private
*dev_priv
, u32 reg
);
2424 void vlv_gpio_nc_write(struct drm_i915_private
*dev_priv
, u32 reg
, u32 val
);
2425 u32
vlv_cck_read(struct drm_i915_private
*dev_priv
, u32 reg
);
2426 void vlv_cck_write(struct drm_i915_private
*dev_priv
, u32 reg
, u32 val
);
2427 u32
vlv_ccu_read(struct drm_i915_private
*dev_priv
, u32 reg
);
2428 void vlv_ccu_write(struct drm_i915_private
*dev_priv
, u32 reg
, u32 val
);
2429 u32
vlv_gps_core_read(struct drm_i915_private
*dev_priv
, u32 reg
);
2430 void vlv_gps_core_write(struct drm_i915_private
*dev_priv
, u32 reg
, u32 val
);
2431 u32
vlv_dpio_read(struct drm_i915_private
*dev_priv
, enum pipe pipe
, int reg
);
2432 void vlv_dpio_write(struct drm_i915_private
*dev_priv
, enum pipe pipe
, int reg
, u32 val
);
2433 u32
intel_sbi_read(struct drm_i915_private
*dev_priv
, u16 reg
,
2434 enum intel_sbi_destination destination
);
2435 void intel_sbi_write(struct drm_i915_private
*dev_priv
, u16 reg
, u32 value
,
2436 enum intel_sbi_destination destination
);
2438 int vlv_gpu_freq(int ddr_freq
, int val
);
2439 int vlv_freq_opcode(int ddr_freq
, int val
);
2441 #define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2442 #define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
2444 #define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2445 #define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2446 #define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2447 #define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
2449 #define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2450 #define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2451 #define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2452 #define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2454 #define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2455 #define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
2457 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2458 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2460 /* "Broadcast RGB" property */
2461 #define INTEL_BROADCAST_RGB_AUTO 0
2462 #define INTEL_BROADCAST_RGB_FULL 1
2463 #define INTEL_BROADCAST_RGB_LIMITED 2
2465 static inline uint32_t i915_vgacntrl_reg(struct drm_device
*dev
)
2467 if (HAS_PCH_SPLIT(dev
))
2468 return CPU_VGACNTRL
;
2469 else if (IS_VALLEYVIEW(dev
))
2470 return VLV_VGACNTRL
;
2475 static inline void __user
*to_user_ptr(u64 address
)
2477 return (void __user
*)(uintptr_t)address
;
2480 static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m
)
2482 unsigned long j
= msecs_to_jiffies(m
);
2484 return min_t(unsigned long, MAX_JIFFY_OFFSET
, j
+ 1);
2487 static inline unsigned long
2488 timespec_to_jiffies_timeout(const struct timespec
*value
)
2490 unsigned long j
= timespec_to_jiffies(value
);
2492 return min_t(unsigned long, MAX_JIFFY_OFFSET
, j
+ 1);