Merge remote-tracking branch 'acme/perf/urgent' into perf/core
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.h
CommitLineData
1da177e4
LT
1/* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
0d6aa60b 3/*
bc54fd1a 4 *
1da177e4
LT
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
bc54fd1a
DA
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
0d6aa60b 28 */
1da177e4
LT
29
30#ifndef _I915_DRV_H_
31#define _I915_DRV_H_
32
e9b73c67
CW
33#include <uapi/drm/i915_drm.h>
34
585fb111 35#include "i915_reg.h"
79e53945 36#include "intel_bios.h"
8187a2b7 37#include "intel_ringbuffer.h"
0839ccb8 38#include <linux/io-mapping.h>
f899fc64 39#include <linux/i2c.h>
c167a6fc 40#include <linux/i2c-algo-bit.h>
0ade6386 41#include <drm/intel-gtt.h>
aaa6fd2a 42#include <linux/backlight.h>
2911a35b 43#include <linux/intel-iommu.h>
742cbee8 44#include <linux/kref.h>
9ee32fea 45#include <linux/pm_qos.h>
585fb111 46
1da177e4
LT
47/* General customization:
48 */
49
50#define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52#define DRIVER_NAME "i915"
53#define DRIVER_DESC "Intel Graphics"
673a394b 54#define DRIVER_DATE "20080730"
1da177e4 55
317c35d1 56enum pipe {
752aa88a 57 INVALID_PIPE = -1,
317c35d1
JB
58 PIPE_A = 0,
59 PIPE_B,
9db4a9c7
JB
60 PIPE_C,
61 I915_MAX_PIPES
317c35d1 62};
9db4a9c7 63#define pipe_name(p) ((p) + 'A')
317c35d1 64
a5c961d1
PZ
65enum transcoder {
66 TRANSCODER_A = 0,
67 TRANSCODER_B,
68 TRANSCODER_C,
69 TRANSCODER_EDP = 0xF,
70};
71#define transcoder_name(t) ((t) + 'A')
72
80824003
JB
73enum plane {
74 PLANE_A = 0,
75 PLANE_B,
9db4a9c7 76 PLANE_C,
80824003 77};
9db4a9c7 78#define plane_name(p) ((p) + 'A')
52440211 79
06da8da2
VS
80#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
81
2b139522
ED
82enum port {
83 PORT_A = 0,
84 PORT_B,
85 PORT_C,
86 PORT_D,
87 PORT_E,
88 I915_MAX_PORTS
89};
90#define port_name(p) ((p) + 'A')
91
e4607fcf
CML
92#define I915_NUM_PHYS_VLV 1
93
94enum dpio_channel {
95 DPIO_CH0,
96 DPIO_CH1
97};
98
99enum dpio_phy {
100 DPIO_PHY0,
101 DPIO_PHY1
102};
103
b97186f0
PZ
104enum intel_display_power_domain {
105 POWER_DOMAIN_PIPE_A,
106 POWER_DOMAIN_PIPE_B,
107 POWER_DOMAIN_PIPE_C,
108 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
109 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
110 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
111 POWER_DOMAIN_TRANSCODER_A,
112 POWER_DOMAIN_TRANSCODER_B,
113 POWER_DOMAIN_TRANSCODER_C,
f52e353e 114 POWER_DOMAIN_TRANSCODER_EDP,
cdf8dd7f 115 POWER_DOMAIN_VGA,
fbeeaa23 116 POWER_DOMAIN_AUDIO,
baa70707 117 POWER_DOMAIN_INIT,
bddc7645
ID
118
119 POWER_DOMAIN_NUM,
b97186f0
PZ
120};
121
bddc7645
ID
122#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
123
b97186f0
PZ
124#define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
125#define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
126 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
f52e353e
ID
127#define POWER_DOMAIN_TRANSCODER(tran) \
128 ((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \
129 (tran) + POWER_DOMAIN_TRANSCODER_A)
b97186f0 130
bddc7645
ID
131#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
132 BIT(POWER_DOMAIN_PIPE_A) | \
133 BIT(POWER_DOMAIN_TRANSCODER_EDP))
6745a2ce
PZ
134#define BDW_ALWAYS_ON_POWER_DOMAINS ( \
135 BIT(POWER_DOMAIN_PIPE_A) | \
136 BIT(POWER_DOMAIN_TRANSCODER_EDP) | \
137 BIT(POWER_DOMAIN_PIPE_A_PANEL_FITTER))
bddc7645 138
1d843f9d
EE
139enum hpd_pin {
140 HPD_NONE = 0,
141 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
142 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
143 HPD_CRT,
144 HPD_SDVO_B,
145 HPD_SDVO_C,
146 HPD_PORT_B,
147 HPD_PORT_C,
148 HPD_PORT_D,
149 HPD_NUM_PINS
150};
151
2a2d5482
CW
152#define I915_GEM_GPU_DOMAINS \
153 (I915_GEM_DOMAIN_RENDER | \
154 I915_GEM_DOMAIN_SAMPLER | \
155 I915_GEM_DOMAIN_COMMAND | \
156 I915_GEM_DOMAIN_INSTRUCTION | \
157 I915_GEM_DOMAIN_VERTEX)
62fdfeaf 158
7eb552ae 159#define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
9db4a9c7 160
6c2b7c12
DV
161#define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
162 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
163 if ((intel_encoder)->base.crtc == (__crtc))
164
e7b903d2
DV
165struct drm_i915_private;
166
46edb027
DV
167enum intel_dpll_id {
168 DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */
169 /* real shared dpll ids must be >= 0 */
170 DPLL_ID_PCH_PLL_A,
171 DPLL_ID_PCH_PLL_B,
172};
173#define I915_NUM_PLLS 2
174
5358901f 175struct intel_dpll_hw_state {
66e985c0 176 uint32_t dpll;
8bcc2795 177 uint32_t dpll_md;
66e985c0
DV
178 uint32_t fp0;
179 uint32_t fp1;
5358901f
DV
180};
181
e72f9fbf 182struct intel_shared_dpll {
ee7b9f93
JB
183 int refcount; /* count of number of CRTCs sharing this PLL */
184 int active; /* count of number of active CRTCs (i.e. DPMS on) */
185 bool on; /* is the PLL actually active? Disabled during modeset */
46edb027
DV
186 const char *name;
187 /* should match the index in the dev_priv->shared_dplls array */
188 enum intel_dpll_id id;
5358901f 189 struct intel_dpll_hw_state hw_state;
15bdd4cf
DV
190 void (*mode_set)(struct drm_i915_private *dev_priv,
191 struct intel_shared_dpll *pll);
e7b903d2
DV
192 void (*enable)(struct drm_i915_private *dev_priv,
193 struct intel_shared_dpll *pll);
194 void (*disable)(struct drm_i915_private *dev_priv,
195 struct intel_shared_dpll *pll);
5358901f
DV
196 bool (*get_hw_state)(struct drm_i915_private *dev_priv,
197 struct intel_shared_dpll *pll,
198 struct intel_dpll_hw_state *hw_state);
ee7b9f93 199};
ee7b9f93 200
e69d0bc1
DV
201/* Used by dp and fdi links */
202struct intel_link_m_n {
203 uint32_t tu;
204 uint32_t gmch_m;
205 uint32_t gmch_n;
206 uint32_t link_m;
207 uint32_t link_n;
208};
209
210void intel_link_compute_m_n(int bpp, int nlanes,
211 int pixel_clock, int link_clock,
212 struct intel_link_m_n *m_n);
213
6441ab5f
PZ
214struct intel_ddi_plls {
215 int spll_refcount;
216 int wrpll1_refcount;
217 int wrpll2_refcount;
218};
219
1da177e4
LT
220/* Interface history:
221 *
222 * 1.1: Original.
0d6aa60b
DA
223 * 1.2: Add Power Management
224 * 1.3: Add vblank support
de227f5f 225 * 1.4: Fix cmdbuffer path, add heap destroy
702880f2 226 * 1.5: Add vblank pipe configuration
2228ed67
MCA
227 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
228 * - Support vertical blank on secondary display pipe
1da177e4
LT
229 */
230#define DRIVER_MAJOR 1
2228ed67 231#define DRIVER_MINOR 6
1da177e4
LT
232#define DRIVER_PATCHLEVEL 0
233
23bc5982 234#define WATCH_LISTS 0
42d6ab48 235#define WATCH_GTT 0
673a394b 236
71acb5eb
DA
237#define I915_GEM_PHYS_CURSOR_0 1
238#define I915_GEM_PHYS_CURSOR_1 2
239#define I915_GEM_PHYS_OVERLAY_REGS 3
240#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
241
242struct drm_i915_gem_phys_object {
243 int id;
244 struct page **page_list;
245 drm_dma_handle_t *handle;
05394f39 246 struct drm_i915_gem_object *cur_obj;
71acb5eb
DA
247};
248
0a3e67a4
JB
249struct opregion_header;
250struct opregion_acpi;
251struct opregion_swsci;
252struct opregion_asle;
253
8ee1c3db 254struct intel_opregion {
5bc4418b
BW
255 struct opregion_header __iomem *header;
256 struct opregion_acpi __iomem *acpi;
257 struct opregion_swsci __iomem *swsci;
ebde53c7
JN
258 u32 swsci_gbda_sub_functions;
259 u32 swsci_sbcb_sub_functions;
5bc4418b
BW
260 struct opregion_asle __iomem *asle;
261 void __iomem *vbt;
01fe9dbd 262 u32 __iomem *lid_state;
91a60f20 263 struct work_struct asle_work;
8ee1c3db 264};
44834a67 265#define OPREGION_SIZE (8*1024)
8ee1c3db 266
6ef3d427
CW
267struct intel_overlay;
268struct intel_overlay_error_state;
269
7c1c2871
DA
270struct drm_i915_master_private {
271 drm_local_map_t *sarea;
272 struct _drm_i915_sarea *sarea_priv;
273};
de151cf6 274#define I915_FENCE_REG_NONE -1
42b5aeab
VS
275#define I915_MAX_NUM_FENCES 32
276/* 32 fences + sign bit for FENCE_REG_NONE */
277#define I915_MAX_NUM_FENCE_BITS 6
de151cf6
JB
278
279struct drm_i915_fence_reg {
007cc8ac 280 struct list_head lru_list;
caea7476 281 struct drm_i915_gem_object *obj;
1690e1eb 282 int pin_count;
de151cf6 283};
7c1c2871 284
9b9d172d 285struct sdvo_device_mapping {
e957d772 286 u8 initialized;
9b9d172d 287 u8 dvo_port;
288 u8 slave_addr;
289 u8 dvo_wiring;
e957d772 290 u8 i2c_pin;
b1083333 291 u8 ddc_pin;
9b9d172d 292};
293
c4a1d9e4
CW
294struct intel_display_error_state;
295
63eeaf38 296struct drm_i915_error_state {
742cbee8 297 struct kref ref;
63eeaf38
JB
298 u32 eir;
299 u32 pgtbl_er;
be998e2e 300 u32 ier;
b9a3906b 301 u32 ccid;
0f3b6849
CW
302 u32 derrmr;
303 u32 forcewake;
9574b3fe 304 bool waiting[I915_NUM_RINGS];
9db4a9c7 305 u32 pipestat[I915_MAX_PIPES];
c1cd90ed
DV
306 u32 tail[I915_NUM_RINGS];
307 u32 head[I915_NUM_RINGS];
0f3b6849 308 u32 ctl[I915_NUM_RINGS];
d27b1e0e
DV
309 u32 ipeir[I915_NUM_RINGS];
310 u32 ipehr[I915_NUM_RINGS];
311 u32 instdone[I915_NUM_RINGS];
312 u32 acthd[I915_NUM_RINGS];
7e3b8737 313 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
df2b23d9 314 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
12f55818 315 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
7e3b8737
DV
316 /* our own tracking of ring head and tail */
317 u32 cpu_ring_head[I915_NUM_RINGS];
318 u32 cpu_ring_tail[I915_NUM_RINGS];
1d8f38f4 319 u32 error; /* gen6+ */
71e172e8 320 u32 err_int; /* gen7 */
94e39e28 321 u32 bbstate[I915_NUM_RINGS];
c1cd90ed
DV
322 u32 instpm[I915_NUM_RINGS];
323 u32 instps[I915_NUM_RINGS];
050ee91f 324 u32 extra_instdone[I915_NUM_INSTDONE_REG];
d27b1e0e 325 u32 seqno[I915_NUM_RINGS];
3dda20a9 326 u64 bbaddr[I915_NUM_RINGS];
33f3f518
DV
327 u32 fault_reg[I915_NUM_RINGS];
328 u32 done_reg;
c1cd90ed 329 u32 faddr[I915_NUM_RINGS];
4b9de737 330 u64 fence[I915_MAX_NUM_FENCES];
63eeaf38 331 struct timeval time;
52d39a21 332 struct drm_i915_error_ring {
372fbb8e 333 bool valid;
52d39a21
CW
334 struct drm_i915_error_object {
335 int page_count;
336 u32 gtt_offset;
337 u32 *pages[0];
8c123e54 338 } *ringbuffer, *batchbuffer, *ctx;
52d39a21
CW
339 struct drm_i915_error_request {
340 long jiffies;
341 u32 seqno;
ee4f42b1 342 u32 tail;
52d39a21
CW
343 } *requests;
344 int num_requests;
345 } ring[I915_NUM_RINGS];
9df30794 346 struct drm_i915_error_buffer {
a779e5ab 347 u32 size;
9df30794 348 u32 name;
0201f1ec 349 u32 rseqno, wseqno;
9df30794
CW
350 u32 gtt_offset;
351 u32 read_domains;
352 u32 write_domain;
4b9de737 353 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
9df30794
CW
354 s32 pinned:2;
355 u32 tiling:2;
356 u32 dirty:1;
357 u32 purgeable:1;
5d1333fc 358 s32 ring:4;
f56383cb 359 u32 cache_level:3;
95f5301d
BW
360 } **active_bo, **pinned_bo;
361 u32 *active_bo_count, *pinned_bo_count;
6ef3d427 362 struct intel_overlay_error_state *overlay;
c4a1d9e4 363 struct intel_display_error_state *display;
da661464
MK
364 int hangcheck_score[I915_NUM_RINGS];
365 enum intel_ring_hangcheck_action hangcheck_action[I915_NUM_RINGS];
63eeaf38
JB
366};
367
7bd688cd 368struct intel_connector;
b8cecdf5 369struct intel_crtc_config;
0e8ffe1b 370struct intel_crtc;
ee9300bb
DV
371struct intel_limit;
372struct dpll;
b8cecdf5 373
e70236a8 374struct drm_i915_display_funcs {
ee5382ae 375 bool (*fbc_enabled)(struct drm_device *dev);
993495ae 376 void (*enable_fbc)(struct drm_crtc *crtc);
e70236a8
JB
377 void (*disable_fbc)(struct drm_device *dev);
378 int (*get_display_clock_speed)(struct drm_device *dev);
379 int (*get_fifo_size)(struct drm_device *dev, int plane);
ee9300bb
DV
380 /**
381 * find_dpll() - Find the best values for the PLL
382 * @limit: limits for the PLL
383 * @crtc: current CRTC
384 * @target: target frequency in kHz
385 * @refclk: reference clock frequency in kHz
386 * @match_clock: if provided, @best_clock P divider must
387 * match the P divider from @match_clock
388 * used for LVDS downclocking
389 * @best_clock: best PLL values found
390 *
391 * Returns true on success, false on failure.
392 */
393 bool (*find_dpll)(const struct intel_limit *limit,
394 struct drm_crtc *crtc,
395 int target, int refclk,
396 struct dpll *match_clock,
397 struct dpll *best_clock);
46ba614c 398 void (*update_wm)(struct drm_crtc *crtc);
adf3d35e
VS
399 void (*update_sprite_wm)(struct drm_plane *plane,
400 struct drm_crtc *crtc,
4c4ff43a 401 uint32_t sprite_width, int pixel_size,
bdd57d03 402 bool enable, bool scaled);
47fab737 403 void (*modeset_global_resources)(struct drm_device *dev);
0e8ffe1b
DV
404 /* Returns the active state of the crtc, and if the crtc is active,
405 * fills out the pipe-config with the hw state. */
406 bool (*get_pipe_config)(struct intel_crtc *,
407 struct intel_crtc_config *);
f564048e 408 int (*crtc_mode_set)(struct drm_crtc *crtc,
f564048e
EA
409 int x, int y,
410 struct drm_framebuffer *old_fb);
76e5a89c
DV
411 void (*crtc_enable)(struct drm_crtc *crtc);
412 void (*crtc_disable)(struct drm_crtc *crtc);
ee7b9f93 413 void (*off)(struct drm_crtc *crtc);
e0dac65e 414 void (*write_eld)(struct drm_connector *connector,
34427052
JN
415 struct drm_crtc *crtc,
416 struct drm_display_mode *mode);
674cf967 417 void (*fdi_link_train)(struct drm_crtc *crtc);
6067aaea 418 void (*init_clock_gating)(struct drm_device *dev);
8c9f3aaf
JB
419 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
420 struct drm_framebuffer *fb,
ed8d1975
KP
421 struct drm_i915_gem_object *obj,
422 uint32_t flags);
17638cd6
JB
423 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
424 int x, int y);
20afbda2 425 void (*hpd_irq_setup)(struct drm_device *dev);
e70236a8
JB
426 /* clock updates for mode set */
427 /* cursor updates */
428 /* render clock increase/decrease */
429 /* display clock increase/decrease */
430 /* pll clock increase/decrease */
7bd688cd
JN
431
432 int (*setup_backlight)(struct intel_connector *connector);
7bd688cd
JN
433 uint32_t (*get_backlight)(struct intel_connector *connector);
434 void (*set_backlight)(struct intel_connector *connector,
435 uint32_t level);
436 void (*disable_backlight)(struct intel_connector *connector);
437 void (*enable_backlight)(struct intel_connector *connector);
e70236a8
JB
438};
439
907b28c5 440struct intel_uncore_funcs {
c8d9a590
D
441 void (*force_wake_get)(struct drm_i915_private *dev_priv,
442 int fw_engine);
443 void (*force_wake_put)(struct drm_i915_private *dev_priv,
444 int fw_engine);
0b274481
BW
445
446 uint8_t (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
447 uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
448 uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
449 uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace);
450
451 void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset,
452 uint8_t val, bool trace);
453 void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset,
454 uint16_t val, bool trace);
455 void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset,
456 uint32_t val, bool trace);
457 void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset,
458 uint64_t val, bool trace);
990bbdad
CW
459};
460
907b28c5
CW
461struct intel_uncore {
462 spinlock_t lock; /** lock is also taken in irq contexts. */
463
464 struct intel_uncore_funcs funcs;
465
466 unsigned fifo_count;
467 unsigned forcewake_count;
aec347ab 468
940aece4
D
469 unsigned fw_rendercount;
470 unsigned fw_mediacount;
471
aec347ab 472 struct delayed_work force_wake_work;
907b28c5
CW
473};
474
79fc46df
DL
475#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
476 func(is_mobile) sep \
477 func(is_i85x) sep \
478 func(is_i915g) sep \
479 func(is_i945gm) sep \
480 func(is_g33) sep \
481 func(need_gfx_hws) sep \
482 func(is_g4x) sep \
483 func(is_pineview) sep \
484 func(is_broadwater) sep \
485 func(is_crestline) sep \
486 func(is_ivybridge) sep \
487 func(is_valleyview) sep \
488 func(is_haswell) sep \
b833d685 489 func(is_preliminary) sep \
79fc46df
DL
490 func(has_fbc) sep \
491 func(has_pipe_cxsr) sep \
492 func(has_hotplug) sep \
493 func(cursor_needs_physical) sep \
494 func(has_overlay) sep \
495 func(overlay_needs_physical) sep \
496 func(supports_tv) sep \
dd93be58 497 func(has_llc) sep \
30568c45
DL
498 func(has_ddi) sep \
499 func(has_fpga_dbg)
c96ea64e 500
a587f779
DL
501#define DEFINE_FLAG(name) u8 name:1
502#define SEP_SEMICOLON ;
c96ea64e 503
cfdf1fa2 504struct intel_device_info {
10fce67a 505 u32 display_mmio_offset;
7eb552ae 506 u8 num_pipes:3;
c96c3a8c 507 u8 gen;
73ae478c 508 u8 ring_mask; /* Rings supported by the HW */
a587f779 509 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
cfdf1fa2
KH
510};
511
a587f779
DL
512#undef DEFINE_FLAG
513#undef SEP_SEMICOLON
514
7faf1ab2
DV
515enum i915_cache_level {
516 I915_CACHE_NONE = 0,
350ec881
CW
517 I915_CACHE_LLC, /* also used for snoopable memory on non-LLC */
518 I915_CACHE_L3_LLC, /* gen7+, L3 sits between the domain specifc
519 caches, eg sampler/render caches, and the
520 large Last-Level-Cache. LLC is coherent with
521 the CPU, but L3 is only visible to the GPU. */
651d794f 522 I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */
7faf1ab2
DV
523};
524
2d04befb
KG
525typedef uint32_t gen6_gtt_pte_t;
526
853ba5d2 527struct i915_address_space {
93bd8649 528 struct drm_mm mm;
853ba5d2 529 struct drm_device *dev;
a7bbbd63 530 struct list_head global_link;
853ba5d2
BW
531 unsigned long start; /* Start offset always 0 for dri2 */
532 size_t total; /* size addr space maps (ex. 2GB for ggtt) */
533
534 struct {
535 dma_addr_t addr;
536 struct page *page;
537 } scratch;
538
5cef07e1
BW
539 /**
540 * List of objects currently involved in rendering.
541 *
542 * Includes buffers having the contents of their GPU caches
543 * flushed, not necessarily primitives. last_rendering_seqno
544 * represents when the rendering involved will be completed.
545 *
546 * A reference is held on the buffer while on this list.
547 */
548 struct list_head active_list;
549
550 /**
551 * LRU list of objects which are not in the ringbuffer and
552 * are ready to unbind, but are still in the GTT.
553 *
554 * last_rendering_seqno is 0 while an object is in this list.
555 *
556 * A reference is not held on the buffer while on this list,
557 * as merely being GTT-bound shouldn't prevent its being
558 * freed, and we'll pull it off the list in the free path.
559 */
560 struct list_head inactive_list;
561
853ba5d2
BW
562 /* FIXME: Need a more generic return type */
563 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
b35b380e
BW
564 enum i915_cache_level level,
565 bool valid); /* Create a valid PTE */
853ba5d2
BW
566 void (*clear_range)(struct i915_address_space *vm,
567 unsigned int first_entry,
828c7908
BW
568 unsigned int num_entries,
569 bool use_scratch);
853ba5d2
BW
570 void (*insert_entries)(struct i915_address_space *vm,
571 struct sg_table *st,
572 unsigned int first_entry,
573 enum i915_cache_level cache_level);
574 void (*cleanup)(struct i915_address_space *vm);
575};
576
5d4545ae
BW
577/* The Graphics Translation Table is the way in which GEN hardware translates a
578 * Graphics Virtual Address into a Physical Address. In addition to the normal
579 * collateral associated with any va->pa translations GEN hardware also has a
580 * portion of the GTT which can be mapped by the CPU and remain both coherent
581 * and correct (in cases like swizzling). That region is referred to as GMADR in
582 * the spec.
583 */
584struct i915_gtt {
853ba5d2 585 struct i915_address_space base;
baa09f5f 586 size_t stolen_size; /* Total size of stolen memory */
5d4545ae
BW
587
588 unsigned long mappable_end; /* End offset that we can CPU map */
589 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
590 phys_addr_t mappable_base; /* PA of our GMADR */
591
592 /** "Graphics Stolen Memory" holds the global PTEs */
593 void __iomem *gsm;
a81cc00c
BW
594
595 bool do_idle_maps;
7faf1ab2 596
911bdf0a 597 int mtrr;
7faf1ab2
DV
598
599 /* global gtt ops */
baa09f5f 600 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
41907ddc
BW
601 size_t *stolen, phys_addr_t *mappable_base,
602 unsigned long *mappable_end);
5d4545ae 603};
853ba5d2 604#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT)
5d4545ae 605
1d2a314c 606struct i915_hw_ppgtt {
853ba5d2 607 struct i915_address_space base;
1d2a314c 608 unsigned num_pd_entries;
37aca44a
BW
609 union {
610 struct page **pt_pages;
611 struct page *gen8_pt_pages;
612 };
613 struct page *pd_pages;
614 int num_pd_pages;
615 int num_pt_pages;
616 union {
617 uint32_t pd_offset;
618 dma_addr_t pd_dma_addr[4];
619 };
620 union {
621 dma_addr_t *pt_dma_addr;
622 dma_addr_t *gen8_pt_dma_addr[4];
623 };
b7c36d25 624 int (*enable)(struct drm_device *dev);
1d2a314c
DV
625};
626
0b02e798
BW
627/**
628 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
629 * VMA's presence cannot be guaranteed before binding, or after unbinding the
630 * object into/from the address space.
631 *
632 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
2f633156
BW
633 * will always be <= an objects lifetime. So object refcounting should cover us.
634 */
635struct i915_vma {
636 struct drm_mm_node node;
637 struct drm_i915_gem_object *obj;
638 struct i915_address_space *vm;
639
ca191b13
BW
640 /** This object's place on the active/inactive lists */
641 struct list_head mm_list;
642
2f633156 643 struct list_head vma_link; /* Link in the object's VMA list */
82a55ad1
BW
644
645 /** This vma's place in the batchbuffer or on the eviction list */
646 struct list_head exec_list;
647
27173f1f
BW
648 /**
649 * Used for performing relocations during execbuffer insertion.
650 */
651 struct hlist_node exec_node;
652 unsigned long exec_handle;
653 struct drm_i915_gem_exec_object2 *exec_entry;
654
1d2a314c
DV
655};
656
e59ec13d
MK
657struct i915_ctx_hang_stats {
658 /* This context had batch pending when hang was declared */
659 unsigned batch_pending;
660
661 /* This context had batch active when hang was declared */
662 unsigned batch_active;
be62acb4
MK
663
664 /* Time when this context was last blamed for a GPU reset */
665 unsigned long guilty_ts;
666
667 /* This context is banned to submit more work */
668 bool banned;
e59ec13d 669};
40521054
BW
670
671/* This must match up with the value previously used for execbuf2.rsvd1. */
672#define DEFAULT_CONTEXT_ID 0
673struct i915_hw_context {
dce3271b 674 struct kref ref;
40521054 675 int id;
e0556841 676 bool is_initialized;
3ccfd19d 677 uint8_t remap_slice;
40521054
BW
678 struct drm_i915_file_private *file_priv;
679 struct intel_ring_buffer *ring;
680 struct drm_i915_gem_object *obj;
e59ec13d 681 struct i915_ctx_hang_stats hang_stats;
a33afea5
BW
682
683 struct list_head link;
40521054
BW
684};
685
5c3fe8b0
BW
686struct i915_fbc {
687 unsigned long size;
688 unsigned int fb_id;
689 enum plane plane;
690 int y;
691
692 struct drm_mm_node *compressed_fb;
693 struct drm_mm_node *compressed_llb;
694
695 struct intel_fbc_work {
696 struct delayed_work work;
697 struct drm_crtc *crtc;
698 struct drm_framebuffer *fb;
5c3fe8b0
BW
699 } *fbc_work;
700
29ebf90f
CW
701 enum no_fbc_reason {
702 FBC_OK, /* FBC is enabled */
703 FBC_UNSUPPORTED, /* FBC is not supported by this chipset */
5c3fe8b0
BW
704 FBC_NO_OUTPUT, /* no outputs enabled to compress */
705 FBC_STOLEN_TOO_SMALL, /* not enough space for buffers */
706 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
707 FBC_MODE_TOO_LARGE, /* mode too large for compression */
708 FBC_BAD_PLANE, /* fbc not supported on plane */
709 FBC_NOT_TILED, /* buffer not tiled */
710 FBC_MULTIPLE_PIPES, /* more than one pipe active */
711 FBC_MODULE_PARAM,
712 FBC_CHIP_DEFAULT, /* disabled by default on this chip */
713 } no_fbc_reason;
b5e50c3f
JB
714};
715
a031d709
RV
716struct i915_psr {
717 bool sink_support;
718 bool source_ok;
3f51e471 719};
5c3fe8b0 720
3bad0781 721enum intel_pch {
f0350830 722 PCH_NONE = 0, /* No PCH present */
3bad0781
ZW
723 PCH_IBX, /* Ibexpeak PCH */
724 PCH_CPT, /* Cougarpoint PCH */
eb877ebf 725 PCH_LPT, /* Lynxpoint PCH */
40c7ead9 726 PCH_NOP,
3bad0781
ZW
727};
728
988d6ee8
PZ
729enum intel_sbi_destination {
730 SBI_ICLK,
731 SBI_MPHY,
732};
733
b690e96c 734#define QUIRK_PIPEA_FORCE (1<<0)
435793df 735#define QUIRK_LVDS_SSC_DISABLE (1<<1)
4dca20ef 736#define QUIRK_INVERT_BRIGHTNESS (1<<2)
b690e96c 737
8be48d92 738struct intel_fbdev;
1630fe75 739struct intel_fbc_work;
38651674 740
c2b9152f
DV
741struct intel_gmbus {
742 struct i2c_adapter adapter;
f2ce9faf 743 u32 force_bit;
c2b9152f 744 u32 reg0;
36c785f0 745 u32 gpio_reg;
c167a6fc 746 struct i2c_algo_bit_data bit_algo;
c2b9152f
DV
747 struct drm_i915_private *dev_priv;
748};
749
f4c956ad 750struct i915_suspend_saved_registers {
ba8bbcf6
JB
751 u8 saveLBB;
752 u32 saveDSPACNTR;
753 u32 saveDSPBCNTR;
e948e994 754 u32 saveDSPARB;
ba8bbcf6
JB
755 u32 savePIPEACONF;
756 u32 savePIPEBCONF;
757 u32 savePIPEASRC;
758 u32 savePIPEBSRC;
759 u32 saveFPA0;
760 u32 saveFPA1;
761 u32 saveDPLL_A;
762 u32 saveDPLL_A_MD;
763 u32 saveHTOTAL_A;
764 u32 saveHBLANK_A;
765 u32 saveHSYNC_A;
766 u32 saveVTOTAL_A;
767 u32 saveVBLANK_A;
768 u32 saveVSYNC_A;
769 u32 saveBCLRPAT_A;
5586c8bc 770 u32 saveTRANSACONF;
42048781
ZW
771 u32 saveTRANS_HTOTAL_A;
772 u32 saveTRANS_HBLANK_A;
773 u32 saveTRANS_HSYNC_A;
774 u32 saveTRANS_VTOTAL_A;
775 u32 saveTRANS_VBLANK_A;
776 u32 saveTRANS_VSYNC_A;
0da3ea12 777 u32 savePIPEASTAT;
ba8bbcf6
JB
778 u32 saveDSPASTRIDE;
779 u32 saveDSPASIZE;
780 u32 saveDSPAPOS;
585fb111 781 u32 saveDSPAADDR;
ba8bbcf6
JB
782 u32 saveDSPASURF;
783 u32 saveDSPATILEOFF;
784 u32 savePFIT_PGM_RATIOS;
0eb96d6e 785 u32 saveBLC_HIST_CTL;
ba8bbcf6
JB
786 u32 saveBLC_PWM_CTL;
787 u32 saveBLC_PWM_CTL2;
07bf139b 788 u32 saveBLC_HIST_CTL_B;
42048781
ZW
789 u32 saveBLC_CPU_PWM_CTL;
790 u32 saveBLC_CPU_PWM_CTL2;
ba8bbcf6
JB
791 u32 saveFPB0;
792 u32 saveFPB1;
793 u32 saveDPLL_B;
794 u32 saveDPLL_B_MD;
795 u32 saveHTOTAL_B;
796 u32 saveHBLANK_B;
797 u32 saveHSYNC_B;
798 u32 saveVTOTAL_B;
799 u32 saveVBLANK_B;
800 u32 saveVSYNC_B;
801 u32 saveBCLRPAT_B;
5586c8bc 802 u32 saveTRANSBCONF;
42048781
ZW
803 u32 saveTRANS_HTOTAL_B;
804 u32 saveTRANS_HBLANK_B;
805 u32 saveTRANS_HSYNC_B;
806 u32 saveTRANS_VTOTAL_B;
807 u32 saveTRANS_VBLANK_B;
808 u32 saveTRANS_VSYNC_B;
0da3ea12 809 u32 savePIPEBSTAT;
ba8bbcf6
JB
810 u32 saveDSPBSTRIDE;
811 u32 saveDSPBSIZE;
812 u32 saveDSPBPOS;
585fb111 813 u32 saveDSPBADDR;
ba8bbcf6
JB
814 u32 saveDSPBSURF;
815 u32 saveDSPBTILEOFF;
585fb111
JB
816 u32 saveVGA0;
817 u32 saveVGA1;
818 u32 saveVGA_PD;
ba8bbcf6
JB
819 u32 saveVGACNTRL;
820 u32 saveADPA;
821 u32 saveLVDS;
585fb111
JB
822 u32 savePP_ON_DELAYS;
823 u32 savePP_OFF_DELAYS;
ba8bbcf6
JB
824 u32 saveDVOA;
825 u32 saveDVOB;
826 u32 saveDVOC;
827 u32 savePP_ON;
828 u32 savePP_OFF;
829 u32 savePP_CONTROL;
585fb111 830 u32 savePP_DIVISOR;
ba8bbcf6
JB
831 u32 savePFIT_CONTROL;
832 u32 save_palette_a[256];
833 u32 save_palette_b[256];
06027f91 834 u32 saveDPFC_CB_BASE;
ba8bbcf6
JB
835 u32 saveFBC_CFB_BASE;
836 u32 saveFBC_LL_BASE;
837 u32 saveFBC_CONTROL;
838 u32 saveFBC_CONTROL2;
0da3ea12
JB
839 u32 saveIER;
840 u32 saveIIR;
841 u32 saveIMR;
42048781
ZW
842 u32 saveDEIER;
843 u32 saveDEIMR;
844 u32 saveGTIER;
845 u32 saveGTIMR;
846 u32 saveFDI_RXA_IMR;
847 u32 saveFDI_RXB_IMR;
1f84e550 848 u32 saveCACHE_MODE_0;
1f84e550 849 u32 saveMI_ARB_STATE;
ba8bbcf6
JB
850 u32 saveSWF0[16];
851 u32 saveSWF1[16];
852 u32 saveSWF2[3];
853 u8 saveMSR;
854 u8 saveSR[8];
123f794f 855 u8 saveGR[25];
ba8bbcf6 856 u8 saveAR_INDEX;
a59e122a 857 u8 saveAR[21];
ba8bbcf6 858 u8 saveDACMASK;
a59e122a 859 u8 saveCR[37];
4b9de737 860 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
1fd1c624
EA
861 u32 saveCURACNTR;
862 u32 saveCURAPOS;
863 u32 saveCURABASE;
864 u32 saveCURBCNTR;
865 u32 saveCURBPOS;
866 u32 saveCURBBASE;
867 u32 saveCURSIZE;
a4fc5ed6
KP
868 u32 saveDP_B;
869 u32 saveDP_C;
870 u32 saveDP_D;
871 u32 savePIPEA_GMCH_DATA_M;
872 u32 savePIPEB_GMCH_DATA_M;
873 u32 savePIPEA_GMCH_DATA_N;
874 u32 savePIPEB_GMCH_DATA_N;
875 u32 savePIPEA_DP_LINK_M;
876 u32 savePIPEB_DP_LINK_M;
877 u32 savePIPEA_DP_LINK_N;
878 u32 savePIPEB_DP_LINK_N;
42048781
ZW
879 u32 saveFDI_RXA_CTL;
880 u32 saveFDI_TXA_CTL;
881 u32 saveFDI_RXB_CTL;
882 u32 saveFDI_TXB_CTL;
883 u32 savePFA_CTL_1;
884 u32 savePFB_CTL_1;
885 u32 savePFA_WIN_SZ;
886 u32 savePFB_WIN_SZ;
887 u32 savePFA_WIN_POS;
888 u32 savePFB_WIN_POS;
5586c8bc
ZW
889 u32 savePCH_DREF_CONTROL;
890 u32 saveDISP_ARB_CTL;
891 u32 savePIPEA_DATA_M1;
892 u32 savePIPEA_DATA_N1;
893 u32 savePIPEA_LINK_M1;
894 u32 savePIPEA_LINK_N1;
895 u32 savePIPEB_DATA_M1;
896 u32 savePIPEB_DATA_N1;
897 u32 savePIPEB_LINK_M1;
898 u32 savePIPEB_LINK_N1;
b5b72e89 899 u32 saveMCHBAR_RENDER_STANDBY;
cda2bb78 900 u32 savePCH_PORT_HOTPLUG;
f4c956ad 901};
c85aa885
DV
902
903struct intel_gen6_power_mgmt {
59cdb63d 904 /* work and pm_iir are protected by dev_priv->irq_lock */
c85aa885
DV
905 struct work_struct work;
906 u32 pm_iir;
59cdb63d 907
c85aa885
DV
908 /* The below variables an all the rps hw state are protected by
909 * dev->struct mutext. */
910 u8 cur_delay;
911 u8 min_delay;
912 u8 max_delay;
52ceb908 913 u8 rpe_delay;
dd75fdc8
CW
914 u8 rp1_delay;
915 u8 rp0_delay;
31c77388 916 u8 hw_max;
1a01ab3b 917
dd75fdc8
CW
918 int last_adj;
919 enum { LOW_POWER, BETWEEN, HIGH_POWER } power;
920
c0951f0c 921 bool enabled;
1a01ab3b 922 struct delayed_work delayed_resume_work;
4fc688ce
JB
923
924 /*
925 * Protects RPS/RC6 register access and PCU communication.
926 * Must be taken after struct_mutex if nested.
927 */
928 struct mutex hw_lock;
c85aa885
DV
929};
930
1a240d4d
DV
931/* defined intel_pm.c */
932extern spinlock_t mchdev_lock;
933
c85aa885
DV
934struct intel_ilk_power_mgmt {
935 u8 cur_delay;
936 u8 min_delay;
937 u8 max_delay;
938 u8 fmax;
939 u8 fstart;
940
941 u64 last_count1;
942 unsigned long last_time1;
943 unsigned long chipset_power;
944 u64 last_count2;
945 struct timespec last_time2;
946 unsigned long gfx_power;
947 u8 corr;
948
949 int c_m;
950 int r_t;
3e373948
DV
951
952 struct drm_i915_gem_object *pwrctx;
953 struct drm_i915_gem_object *renderctx;
c85aa885
DV
954};
955
a38911a3
WX
956/* Power well structure for haswell */
957struct i915_power_well {
c1ca727f 958 const char *name;
6f3ef5dd 959 bool always_on;
a38911a3
WX
960 /* power well enable/disable usage count */
961 int count;
c1ca727f
ID
962 unsigned long domains;
963 void *data;
964 void (*set)(struct drm_device *dev, struct i915_power_well *power_well,
965 bool enable);
966 bool (*is_enabled)(struct drm_device *dev,
967 struct i915_power_well *power_well);
a38911a3
WX
968};
969
83c00f55 970struct i915_power_domains {
baa70707
ID
971 /*
972 * Power wells needed for initialization at driver init and suspend
973 * time are on. They are kept on until after the first modeset.
974 */
975 bool init_power_on;
c1ca727f 976 int power_well_count;
baa70707 977
83c00f55 978 struct mutex lock;
1da51581 979 int domain_use_count[POWER_DOMAIN_NUM];
c1ca727f 980 struct i915_power_well *power_wells;
83c00f55
ID
981};
982
231f42a4
DV
983struct i915_dri1_state {
984 unsigned allow_batchbuffer : 1;
985 u32 __iomem *gfx_hws_cpu_addr;
986
987 unsigned int cpp;
988 int back_offset;
989 int front_offset;
990 int current_page;
991 int page_flipping;
992
993 uint32_t counter;
994};
995
db1b76ca
DV
996struct i915_ums_state {
997 /**
998 * Flag if the X Server, and thus DRM, is not currently in
999 * control of the device.
1000 *
1001 * This is set between LeaveVT and EnterVT. It needs to be
1002 * replaced with a semaphore. It also needs to be
1003 * transitioned away from for kernel modesetting.
1004 */
1005 int mm_suspended;
1006};
1007
35a85ac6 1008#define MAX_L3_SLICES 2
a4da4fa4 1009struct intel_l3_parity {
35a85ac6 1010 u32 *remap_info[MAX_L3_SLICES];
a4da4fa4 1011 struct work_struct error_work;
35a85ac6 1012 int which_slice;
a4da4fa4
DV
1013};
1014
4b5aed62 1015struct i915_gem_mm {
4b5aed62
DV
1016 /** Memory allocator for GTT stolen memory */
1017 struct drm_mm stolen;
4b5aed62
DV
1018 /** List of all objects in gtt_space. Used to restore gtt
1019 * mappings on resume */
1020 struct list_head bound_list;
1021 /**
1022 * List of objects which are not bound to the GTT (thus
1023 * are idle and not used by the GPU) but still have
1024 * (presumably uncached) pages still attached.
1025 */
1026 struct list_head unbound_list;
1027
1028 /** Usable portion of the GTT for GEM */
1029 unsigned long stolen_base; /* limited to low memory (32-bit) */
1030
4b5aed62
DV
1031 /** PPGTT used for aliasing the PPGTT with the GTT */
1032 struct i915_hw_ppgtt *aliasing_ppgtt;
1033
1034 struct shrinker inactive_shrinker;
1035 bool shrinker_no_lock_stealing;
1036
4b5aed62
DV
1037 /** LRU list of objects with fence regs on them. */
1038 struct list_head fence_list;
1039
1040 /**
1041 * We leave the user IRQ off as much as possible,
1042 * but this means that requests will finish and never
1043 * be retired once the system goes idle. Set a timer to
1044 * fire periodically while the ring is running. When it
1045 * fires, go retire requests.
1046 */
1047 struct delayed_work retire_work;
1048
b29c19b6
CW
1049 /**
1050 * When we detect an idle GPU, we want to turn on
1051 * powersaving features. So once we see that there
1052 * are no more requests outstanding and no more
1053 * arrive within a small period of time, we fire
1054 * off the idle_work.
1055 */
1056 struct delayed_work idle_work;
1057
4b5aed62
DV
1058 /**
1059 * Are we in a non-interruptible section of code like
1060 * modesetting?
1061 */
1062 bool interruptible;
1063
4b5aed62
DV
1064 /** Bit 6 swizzling required for X tiling */
1065 uint32_t bit_6_swizzle_x;
1066 /** Bit 6 swizzling required for Y tiling */
1067 uint32_t bit_6_swizzle_y;
1068
1069 /* storage for physical objects */
1070 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
1071
1072 /* accounting, useful for userland debugging */
c20e8355 1073 spinlock_t object_stat_lock;
4b5aed62
DV
1074 size_t object_memory;
1075 u32 object_count;
1076};
1077
edc3d884
MK
1078struct drm_i915_error_state_buf {
1079 unsigned bytes;
1080 unsigned size;
1081 int err;
1082 u8 *buf;
1083 loff_t start;
1084 loff_t pos;
1085};
1086
fc16b48b
MK
1087struct i915_error_state_file_priv {
1088 struct drm_device *dev;
1089 struct drm_i915_error_state *error;
1090};
1091
99584db3
DV
1092struct i915_gpu_error {
1093 /* For hangcheck timer */
1094#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
1095#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
be62acb4
MK
1096 /* Hang gpu twice in this window and your context gets banned */
1097#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000)
1098
99584db3 1099 struct timer_list hangcheck_timer;
99584db3
DV
1100
1101 /* For reset and error_state handling. */
1102 spinlock_t lock;
1103 /* Protected by the above dev->gpu_error.lock. */
1104 struct drm_i915_error_state *first_error;
1105 struct work_struct work;
99584db3 1106
094f9a54
CW
1107
1108 unsigned long missed_irq_rings;
1109
1f83fee0 1110 /**
2ac0f450 1111 * State variable controlling the reset flow and count
1f83fee0 1112 *
2ac0f450
MK
1113 * This is a counter which gets incremented when reset is triggered,
1114 * and again when reset has been handled. So odd values (lowest bit set)
1115 * means that reset is in progress and even values that
1116 * (reset_counter >> 1):th reset was successfully completed.
1117 *
1118 * If reset is not completed succesfully, the I915_WEDGE bit is
1119 * set meaning that hardware is terminally sour and there is no
1120 * recovery. All waiters on the reset_queue will be woken when
1121 * that happens.
1122 *
1123 * This counter is used by the wait_seqno code to notice that reset
1124 * event happened and it needs to restart the entire ioctl (since most
1125 * likely the seqno it waited for won't ever signal anytime soon).
f69061be
DV
1126 *
1127 * This is important for lock-free wait paths, where no contended lock
1128 * naturally enforces the correct ordering between the bail-out of the
1129 * waiter and the gpu reset work code.
1f83fee0
DV
1130 */
1131 atomic_t reset_counter;
1132
1f83fee0 1133#define I915_RESET_IN_PROGRESS_FLAG 1
2ac0f450 1134#define I915_WEDGED (1 << 31)
1f83fee0
DV
1135
1136 /**
1137 * Waitqueue to signal when the reset has completed. Used by clients
1138 * that wait for dev_priv->mm.wedged to settle.
1139 */
1140 wait_queue_head_t reset_queue;
33196ded 1141
99584db3
DV
1142 /* For gpu hang simulation. */
1143 unsigned int stop_rings;
094f9a54
CW
1144
1145 /* For missed irq/seqno simulation. */
1146 unsigned int test_irq_rings;
99584db3
DV
1147};
1148
b8efb17b
ZR
1149enum modeset_restore {
1150 MODESET_ON_LID_OPEN,
1151 MODESET_DONE,
1152 MODESET_SUSPENDED,
1153};
1154
6acab15a
PZ
1155struct ddi_vbt_port_info {
1156 uint8_t hdmi_level_shift;
311a2094
PZ
1157
1158 uint8_t supports_dvi:1;
1159 uint8_t supports_hdmi:1;
1160 uint8_t supports_dp:1;
6acab15a
PZ
1161};
1162
41aa3448
RV
1163struct intel_vbt_data {
1164 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1165 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1166
1167 /* Feature bits */
1168 unsigned int int_tv_support:1;
1169 unsigned int lvds_dither:1;
1170 unsigned int lvds_vbt:1;
1171 unsigned int int_crt_support:1;
1172 unsigned int lvds_use_ssc:1;
1173 unsigned int display_clock_mode:1;
1174 unsigned int fdi_rx_polarity_inverted:1;
1175 int lvds_ssc_freq;
1176 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
1177
1178 /* eDP */
1179 int edp_rate;
1180 int edp_lanes;
1181 int edp_preemphasis;
1182 int edp_vswing;
1183 bool edp_initialized;
1184 bool edp_support;
1185 int edp_bpp;
1186 struct edp_power_seq edp_pps;
1187
f00076d2
JN
1188 struct {
1189 u16 pwm_freq_hz;
1190 bool active_low_pwm;
1191 } backlight;
1192
d17c5443
SK
1193 /* MIPI DSI */
1194 struct {
1195 u16 panel_id;
1196 } dsi;
1197
41aa3448
RV
1198 int crt_ddc_pin;
1199
1200 int child_dev_num;
768f69c9 1201 union child_device_config *child_dev;
6acab15a
PZ
1202
1203 struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];
41aa3448
RV
1204};
1205
77c122bc
VS
1206enum intel_ddb_partitioning {
1207 INTEL_DDB_PART_1_2,
1208 INTEL_DDB_PART_5_6, /* IVB+ */
1209};
1210
1fd527cc
VS
1211struct intel_wm_level {
1212 bool enable;
1213 uint32_t pri_val;
1214 uint32_t spr_val;
1215 uint32_t cur_val;
1216 uint32_t fbc_val;
1217};
1218
820c1980 1219struct ilk_wm_values {
609cedef
VS
1220 uint32_t wm_pipe[3];
1221 uint32_t wm_lp[3];
1222 uint32_t wm_lp_spr[3];
1223 uint32_t wm_linetime[3];
1224 bool enable_fbc_wm;
1225 enum intel_ddb_partitioning partitioning;
1226};
1227
c67a470b
PZ
1228/*
1229 * This struct tracks the state needed for the Package C8+ feature.
1230 *
1231 * Package states C8 and deeper are really deep PC states that can only be
1232 * reached when all the devices on the system allow it, so even if the graphics
1233 * device allows PC8+, it doesn't mean the system will actually get to these
1234 * states.
1235 *
1236 * Our driver only allows PC8+ when all the outputs are disabled, the power well
1237 * is disabled and the GPU is idle. When these conditions are met, we manually
1238 * do the other conditions: disable the interrupts, clocks and switch LCPLL
1239 * refclk to Fclk.
1240 *
1241 * When we really reach PC8 or deeper states (not just when we allow it) we lose
1242 * the state of some registers, so when we come back from PC8+ we need to
1243 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
1244 * need to take care of the registers kept by RC6.
1245 *
1246 * The interrupt disabling is part of the requirements. We can only leave the
1247 * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we
1248 * can lock the machine.
1249 *
1250 * Ideally every piece of our code that needs PC8+ disabled would call
1251 * hsw_disable_package_c8, which would increment disable_count and prevent the
1252 * system from reaching PC8+. But we don't have a symmetric way to do this for
1253 * everything, so we have the requirements_met and gpu_idle variables. When we
1254 * switch requirements_met or gpu_idle to true we decrease disable_count, and
1255 * increase it in the opposite case. The requirements_met variable is true when
1256 * all the CRTCs, encoders and the power well are disabled. The gpu_idle
1257 * variable is true when the GPU is idle.
1258 *
1259 * In addition to everything, we only actually enable PC8+ if disable_count
1260 * stays at zero for at least some seconds. This is implemented with the
1261 * enable_work variable. We do this so we don't enable/disable PC8 dozens of
1262 * consecutive times when all screens are disabled and some background app
1263 * queries the state of our connectors, or we have some application constantly
1264 * waking up to use the GPU. Only after the enable_work function actually
1265 * enables PC8+ the "enable" variable will become true, which means that it can
1266 * be false even if disable_count is 0.
1267 *
1268 * The irqs_disabled variable becomes true exactly after we disable the IRQs and
1269 * goes back to false exactly before we reenable the IRQs. We use this variable
1270 * to check if someone is trying to enable/disable IRQs while they're supposed
1271 * to be disabled. This shouldn't happen and we'll print some error messages in
1272 * case it happens, but if it actually happens we'll also update the variables
1273 * inside struct regsave so when we restore the IRQs they will contain the
1274 * latest expected values.
1275 *
1276 * For more, read "Display Sequences for Package C8" on our documentation.
1277 */
1278struct i915_package_c8 {
1279 bool requirements_met;
1280 bool gpu_idle;
1281 bool irqs_disabled;
1282 /* Only true after the delayed work task actually enables it. */
1283 bool enabled;
1284 int disable_count;
1285 struct mutex lock;
1286 struct delayed_work enable_work;
1287
1288 struct {
1289 uint32_t deimr;
1290 uint32_t sdeimr;
1291 uint32_t gtimr;
1292 uint32_t gtier;
1293 uint32_t gen6_pmimr;
1294 } regsave;
1295};
1296
8a187455
PZ
1297struct i915_runtime_pm {
1298 bool suspended;
1299};
1300
926321d5
DV
1301enum intel_pipe_crc_source {
1302 INTEL_PIPE_CRC_SOURCE_NONE,
1303 INTEL_PIPE_CRC_SOURCE_PLANE1,
1304 INTEL_PIPE_CRC_SOURCE_PLANE2,
1305 INTEL_PIPE_CRC_SOURCE_PF,
5b3a856b 1306 INTEL_PIPE_CRC_SOURCE_PIPE,
3d099a05
DV
1307 /* TV/DP on pre-gen5/vlv can't use the pipe source. */
1308 INTEL_PIPE_CRC_SOURCE_TV,
1309 INTEL_PIPE_CRC_SOURCE_DP_B,
1310 INTEL_PIPE_CRC_SOURCE_DP_C,
1311 INTEL_PIPE_CRC_SOURCE_DP_D,
46a19188 1312 INTEL_PIPE_CRC_SOURCE_AUTO,
926321d5
DV
1313 INTEL_PIPE_CRC_SOURCE_MAX,
1314};
1315
8bf1e9f1 1316struct intel_pipe_crc_entry {
ac2300d4 1317 uint32_t frame;
8bf1e9f1
SH
1318 uint32_t crc[5];
1319};
1320
b2c88f5b 1321#define INTEL_PIPE_CRC_ENTRIES_NR 128
8bf1e9f1 1322struct intel_pipe_crc {
d538bbdf
DL
1323 spinlock_t lock;
1324 bool opened; /* exclusive access to the result file */
e5f75aca 1325 struct intel_pipe_crc_entry *entries;
926321d5 1326 enum intel_pipe_crc_source source;
d538bbdf 1327 int head, tail;
07144428 1328 wait_queue_head_t wq;
8bf1e9f1
SH
1329};
1330
f4c956ad
DV
1331typedef struct drm_i915_private {
1332 struct drm_device *dev;
42dcedd4 1333 struct kmem_cache *slab;
f4c956ad
DV
1334
1335 const struct intel_device_info *info;
1336
1337 int relative_constants_mode;
1338
1339 void __iomem *regs;
1340
907b28c5 1341 struct intel_uncore uncore;
f4c956ad
DV
1342
1343 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
1344
28c70f16 1345
f4c956ad
DV
1346 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
1347 * controller on different i2c buses. */
1348 struct mutex gmbus_mutex;
1349
1350 /**
1351 * Base address of the gmbus and gpio block.
1352 */
1353 uint32_t gpio_mmio_base;
1354
28c70f16
DV
1355 wait_queue_head_t gmbus_wait_queue;
1356
f4c956ad
DV
1357 struct pci_dev *bridge_dev;
1358 struct intel_ring_buffer ring[I915_NUM_RINGS];
f72b3435 1359 uint32_t last_seqno, next_seqno;
f4c956ad
DV
1360
1361 drm_dma_handle_t *status_page_dmah;
f4c956ad
DV
1362 struct resource mch_res;
1363
1364 atomic_t irq_received;
1365
1366 /* protects the irq masks */
1367 spinlock_t irq_lock;
1368
9ee32fea
DV
1369 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
1370 struct pm_qos_request pm_qos;
1371
f4c956ad 1372 /* DPIO indirect register protection */
09153000 1373 struct mutex dpio_lock;
f4c956ad
DV
1374
1375 /** Cached value of IMR to avoid reads in updating the bitfield */
abd58f01
BW
1376 union {
1377 u32 irq_mask;
1378 u32 de_irq_mask[I915_MAX_PIPES];
1379 };
f4c956ad 1380 u32 gt_irq_mask;
605cd25b 1381 u32 pm_irq_mask;
f4c956ad 1382
f4c956ad 1383 struct work_struct hotplug_work;
52d7eced 1384 bool enable_hotplug_processing;
b543fb04
EE
1385 struct {
1386 unsigned long hpd_last_jiffies;
1387 int hpd_cnt;
1388 enum {
1389 HPD_ENABLED = 0,
1390 HPD_DISABLED = 1,
1391 HPD_MARK_DISABLED = 2
1392 } hpd_mark;
1393 } hpd_stats[HPD_NUM_PINS];
142e2398 1394 u32 hpd_event_bits;
ac4c16c5 1395 struct timer_list hotplug_reenable_timer;
f4c956ad 1396
7f1f3851 1397 int num_plane;
f4c956ad 1398
5c3fe8b0 1399 struct i915_fbc fbc;
f4c956ad 1400 struct intel_opregion opregion;
41aa3448 1401 struct intel_vbt_data vbt;
f4c956ad
DV
1402
1403 /* overlay */
1404 struct intel_overlay *overlay;
f4c956ad 1405
58c68779
JN
1406 /* backlight registers and fields in struct intel_panel */
1407 spinlock_t backlight_lock;
31ad8ec6 1408
f4c956ad 1409 /* LVDS info */
f4c956ad
DV
1410 bool no_aux_handshake;
1411
f4c956ad
DV
1412 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1413 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1414 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1415
1416 unsigned int fsb_freq, mem_freq, is_ddr3;
1417
645416f5
DV
1418 /**
1419 * wq - Driver workqueue for GEM.
1420 *
1421 * NOTE: Work items scheduled here are not allowed to grab any modeset
1422 * locks, for otherwise the flushing done in the pageflip code will
1423 * result in deadlocks.
1424 */
f4c956ad
DV
1425 struct workqueue_struct *wq;
1426
1427 /* Display functions */
1428 struct drm_i915_display_funcs display;
1429
1430 /* PCH chipset type */
1431 enum intel_pch pch_type;
17a303ec 1432 unsigned short pch_id;
f4c956ad
DV
1433
1434 unsigned long quirks;
1435
b8efb17b
ZR
1436 enum modeset_restore modeset_restore;
1437 struct mutex modeset_restore_lock;
673a394b 1438
a7bbbd63 1439 struct list_head vm_list; /* Global list of all address spaces */
853ba5d2 1440 struct i915_gtt gtt; /* VMA representing the global address space */
5d4545ae 1441
4b5aed62 1442 struct i915_gem_mm mm;
8781342d 1443
8781342d
DV
1444 /* Kernel Modesetting */
1445
9b9d172d 1446 struct sdvo_device_mapping sdvo_mappings[2];
652c393a 1447
27f8227b
JB
1448 struct drm_crtc *plane_to_crtc_mapping[3];
1449 struct drm_crtc *pipe_to_crtc_mapping[3];
6b95a207
KH
1450 wait_queue_head_t pending_flip_queue;
1451
c4597872
DV
1452#ifdef CONFIG_DEBUG_FS
1453 struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
1454#endif
1455
e72f9fbf
DV
1456 int num_shared_dpll;
1457 struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
6441ab5f 1458 struct intel_ddi_plls ddi_plls;
e4607fcf 1459 int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
ee7b9f93 1460
652c393a
JB
1461 /* Reclocking support */
1462 bool render_reclock_avail;
1463 bool lvds_downclock_avail;
18f9ed12
ZY
1464 /* indicates the reduced downclock for LVDS*/
1465 int lvds_downclock;
652c393a 1466 u16 orig_clock;
f97108d1 1467
c4804411 1468 bool mchbar_need_disable;
f97108d1 1469
a4da4fa4
DV
1470 struct intel_l3_parity l3_parity;
1471
59124506
BW
1472 /* Cannot be determined by PCIID. You must always read a register. */
1473 size_t ellc_size;
1474
c6a828d3 1475 /* gen6+ rps state */
c85aa885 1476 struct intel_gen6_power_mgmt rps;
c6a828d3 1477
20e4d407
DV
1478 /* ilk-only ips/rps state. Everything in here is protected by the global
1479 * mchdev_lock in intel_pm.c */
c85aa885 1480 struct intel_ilk_power_mgmt ips;
b5e50c3f 1481
83c00f55 1482 struct i915_power_domains power_domains;
a38911a3 1483
a031d709 1484 struct i915_psr psr;
3f51e471 1485
99584db3 1486 struct i915_gpu_error gpu_error;
ae681d96 1487
c9cddffc
JB
1488 struct drm_i915_gem_object *vlv_pctx;
1489
4520f53a 1490#ifdef CONFIG_DRM_I915_FBDEV
8be48d92
DA
1491 /* list of fbdev register on this device */
1492 struct intel_fbdev *fbdev;
4520f53a 1493#endif
e953fd7b 1494
073f34d9
JB
1495 /*
1496 * The console may be contended at resume, but we don't
1497 * want it to block on it.
1498 */
1499 struct work_struct console_resume_work;
1500
e953fd7b 1501 struct drm_property *broadcast_rgb_property;
3f43c48d 1502 struct drm_property *force_audio_property;
e3689190 1503
254f965c 1504 uint32_t hw_context_size;
a33afea5 1505 struct list_head context_list;
f4c956ad 1506
3e68320e 1507 u32 fdi_rx_config;
68d18ad7 1508
f4c956ad 1509 struct i915_suspend_saved_registers regfile;
231f42a4 1510
53615a5e
VS
1511 struct {
1512 /*
1513 * Raw watermark latency values:
1514 * in 0.1us units for WM0,
1515 * in 0.5us units for WM1+.
1516 */
1517 /* primary */
1518 uint16_t pri_latency[5];
1519 /* sprite */
1520 uint16_t spr_latency[5];
1521 /* cursor */
1522 uint16_t cur_latency[5];
609cedef
VS
1523
1524 /* current hardware state */
820c1980 1525 struct ilk_wm_values hw;
53615a5e
VS
1526 } wm;
1527
c67a470b
PZ
1528 struct i915_package_c8 pc8;
1529
8a187455
PZ
1530 struct i915_runtime_pm pm;
1531
231f42a4
DV
1532 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1533 * here! */
1534 struct i915_dri1_state dri1;
db1b76ca
DV
1535 /* Old ums support infrastructure, same warning applies. */
1536 struct i915_ums_state ums;
1da177e4
LT
1537} drm_i915_private_t;
1538
2c1792a1
CW
1539static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
1540{
1541 return dev->dev_private;
1542}
1543
b4519513
CW
1544/* Iterate over initialised rings */
1545#define for_each_ring(ring__, dev_priv__, i__) \
1546 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1547 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1548
b1d7e4b4
WF
1549enum hdmi_force_audio {
1550 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1551 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1552 HDMI_AUDIO_AUTO, /* trust EDID */
1553 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1554};
1555
190d6cd5 1556#define I915_GTT_OFFSET_NONE ((u32)-1)
ed2f3452 1557
37e680a1
CW
1558struct drm_i915_gem_object_ops {
1559 /* Interface between the GEM object and its backing storage.
1560 * get_pages() is called once prior to the use of the associated set
1561 * of pages before to binding them into the GTT, and put_pages() is
1562 * called after we no longer need them. As we expect there to be
1563 * associated cost with migrating pages between the backing storage
1564 * and making them available for the GPU (e.g. clflush), we may hold
1565 * onto the pages after they are no longer referenced by the GPU
1566 * in case they may be used again shortly (for example migrating the
1567 * pages to a different memory domain within the GTT). put_pages()
1568 * will therefore most likely be called when the object itself is
1569 * being released or under memory pressure (where we attempt to
1570 * reap pages for the shrinker).
1571 */
1572 int (*get_pages)(struct drm_i915_gem_object *);
1573 void (*put_pages)(struct drm_i915_gem_object *);
1574};
1575
673a394b 1576struct drm_i915_gem_object {
c397b908 1577 struct drm_gem_object base;
673a394b 1578
37e680a1
CW
1579 const struct drm_i915_gem_object_ops *ops;
1580
2f633156
BW
1581 /** List of VMAs backed by this object */
1582 struct list_head vma_list;
1583
c1ad11fc
CW
1584 /** Stolen memory for this object, instead of being backed by shmem. */
1585 struct drm_mm_node *stolen;
35c20a60 1586 struct list_head global_list;
673a394b 1587
69dc4987 1588 struct list_head ring_list;
b25cb2f8
BW
1589 /** Used in execbuf to temporarily hold a ref */
1590 struct list_head obj_exec_link;
673a394b
EA
1591
1592 /**
65ce3027
CW
1593 * This is set if the object is on the active lists (has pending
1594 * rendering and so a non-zero seqno), and is not set if it i s on
1595 * inactive (ready to be unbound) list.
673a394b 1596 */
0206e353 1597 unsigned int active:1;
673a394b
EA
1598
1599 /**
1600 * This is set if the object has been written to since last bound
1601 * to the GTT
1602 */
0206e353 1603 unsigned int dirty:1;
778c3544
DV
1604
1605 /**
1606 * Fence register bits (if any) for this object. Will be set
1607 * as needed when mapped into the GTT.
1608 * Protected by dev->struct_mutex.
778c3544 1609 */
4b9de737 1610 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
778c3544 1611
778c3544
DV
1612 /**
1613 * Advice: are the backing pages purgeable?
1614 */
0206e353 1615 unsigned int madv:2;
778c3544 1616
778c3544
DV
1617 /**
1618 * Current tiling mode for the object.
1619 */
0206e353 1620 unsigned int tiling_mode:2;
5d82e3e6
CW
1621 /**
1622 * Whether the tiling parameters for the currently associated fence
1623 * register have changed. Note that for the purposes of tracking
1624 * tiling changes we also treat the unfenced register, the register
1625 * slot that the object occupies whilst it executes a fenced
1626 * command (such as BLT on gen2/3), as a "fence".
1627 */
1628 unsigned int fence_dirty:1;
778c3544
DV
1629
1630 /** How many users have pinned this object in GTT space. The following
1631 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1632 * (via user_pin_count), execbuffer (objects are not allowed multiple
1633 * times for the same batchbuffer), and the framebuffer code. When
1634 * switching/pageflipping, the framebuffer code has at most two buffers
1635 * pinned per crtc.
1636 *
1637 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1638 * bits with absolutely no headroom. So use 4 bits. */
0206e353 1639 unsigned int pin_count:4;
778c3544 1640#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
673a394b 1641
75e9e915
DV
1642 /**
1643 * Is the object at the current location in the gtt mappable and
1644 * fenceable? Used to avoid costly recalculations.
1645 */
0206e353 1646 unsigned int map_and_fenceable:1;
75e9e915 1647
fb7d516a
DV
1648 /**
1649 * Whether the current gtt mapping needs to be mappable (and isn't just
1650 * mappable by accident). Track pin and fault separate for a more
1651 * accurate mappable working set.
1652 */
0206e353
AJ
1653 unsigned int fault_mappable:1;
1654 unsigned int pin_mappable:1;
cc98b413 1655 unsigned int pin_display:1;
fb7d516a 1656
caea7476
CW
1657 /*
1658 * Is the GPU currently using a fence to access this buffer,
1659 */
1660 unsigned int pending_fenced_gpu_access:1;
1661 unsigned int fenced_gpu_access:1;
1662
651d794f 1663 unsigned int cache_level:3;
93dfb40c 1664
7bddb01f 1665 unsigned int has_aliasing_ppgtt_mapping:1;
74898d7e 1666 unsigned int has_global_gtt_mapping:1;
9da3da66 1667 unsigned int has_dma_mapping:1;
7bddb01f 1668
9da3da66 1669 struct sg_table *pages;
a5570178 1670 int pages_pin_count;
673a394b 1671
1286ff73 1672 /* prime dma-buf support */
9a70cc2a
DA
1673 void *dma_buf_vmapping;
1674 int vmapping_count;
1675
caea7476
CW
1676 struct intel_ring_buffer *ring;
1677
1c293ea3 1678 /** Breadcrumb of last rendering to the buffer. */
0201f1ec
CW
1679 uint32_t last_read_seqno;
1680 uint32_t last_write_seqno;
caea7476
CW
1681 /** Breadcrumb of last fenced GPU access to the buffer. */
1682 uint32_t last_fenced_seqno;
673a394b 1683
778c3544 1684 /** Current tiling stride for the object, if it's tiled. */
de151cf6 1685 uint32_t stride;
673a394b 1686
80075d49
DV
1687 /** References from framebuffers, locks out tiling changes. */
1688 unsigned long framebuffer_references;
1689
280b713b 1690 /** Record of address bit 17 of each page at last unbind. */
d312ec25 1691 unsigned long *bit_17;
280b713b 1692
79e53945 1693 /** User space pin count and filp owning the pin */
aa5f8021 1694 unsigned long user_pin_count;
79e53945 1695 struct drm_file *pin_filp;
71acb5eb
DA
1696
1697 /** for phy allocated objects */
1698 struct drm_i915_gem_phys_object *phys_obj;
673a394b 1699};
b45305fc 1700#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
673a394b 1701
62b8b215 1702#define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
23010e43 1703
673a394b
EA
1704/**
1705 * Request queue structure.
1706 *
1707 * The request queue allows us to note sequence numbers that have been emitted
1708 * and may be associated with active buffers to be retired.
1709 *
1710 * By keeping this list, we can avoid having to do questionable
1711 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1712 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1713 */
1714struct drm_i915_gem_request {
852835f3
ZN
1715 /** On Which ring this request was generated */
1716 struct intel_ring_buffer *ring;
1717
673a394b
EA
1718 /** GEM sequence number associated with this request. */
1719 uint32_t seqno;
1720
7d736f4f
MK
1721 /** Position in the ringbuffer of the start of the request */
1722 u32 head;
1723
1724 /** Position in the ringbuffer of the end of the request */
a71d8d94
CW
1725 u32 tail;
1726
0e50e96b
MK
1727 /** Context related to this request */
1728 struct i915_hw_context *ctx;
1729
7d736f4f
MK
1730 /** Batch buffer related to this request if any */
1731 struct drm_i915_gem_object *batch_obj;
1732
673a394b
EA
1733 /** Time at which this request was emitted, in jiffies. */
1734 unsigned long emitted_jiffies;
1735
b962442e 1736 /** global list entry for this request */
673a394b 1737 struct list_head list;
b962442e 1738
f787a5f5 1739 struct drm_i915_file_private *file_priv;
b962442e
EA
1740 /** file_priv list entry for this request */
1741 struct list_head client_list;
673a394b
EA
1742};
1743
1744struct drm_i915_file_private {
b29c19b6
CW
1745 struct drm_i915_private *dev_priv;
1746
673a394b 1747 struct {
99057c81 1748 spinlock_t lock;
b962442e 1749 struct list_head request_list;
b29c19b6 1750 struct delayed_work idle_work;
673a394b 1751 } mm;
40521054 1752 struct idr context_idr;
e59ec13d
MK
1753
1754 struct i915_ctx_hang_stats hang_stats;
b29c19b6 1755 atomic_t rps_wait_boost;
673a394b
EA
1756};
1757
2c1792a1 1758#define INTEL_INFO(dev) (to_i915(dev)->info)
cae5852d 1759
ffbab09b
VS
1760#define IS_I830(dev) ((dev)->pdev->device == 0x3577)
1761#define IS_845G(dev) ((dev)->pdev->device == 0x2562)
cae5852d 1762#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
ffbab09b 1763#define IS_I865G(dev) ((dev)->pdev->device == 0x2572)
cae5852d 1764#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
ffbab09b
VS
1765#define IS_I915GM(dev) ((dev)->pdev->device == 0x2592)
1766#define IS_I945G(dev) ((dev)->pdev->device == 0x2772)
cae5852d
ZN
1767#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1768#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1769#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
ffbab09b 1770#define IS_GM45(dev) ((dev)->pdev->device == 0x2A42)
cae5852d 1771#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
ffbab09b
VS
1772#define IS_PINEVIEW_G(dev) ((dev)->pdev->device == 0xa001)
1773#define IS_PINEVIEW_M(dev) ((dev)->pdev->device == 0xa011)
cae5852d
ZN
1774#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1775#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
ffbab09b 1776#define IS_IRONLAKE_M(dev) ((dev)->pdev->device == 0x0046)
4b65177b 1777#define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
ffbab09b
VS
1778#define IS_IVB_GT1(dev) ((dev)->pdev->device == 0x0156 || \
1779 (dev)->pdev->device == 0x0152 || \
1780 (dev)->pdev->device == 0x015a)
1781#define IS_SNB_GT1(dev) ((dev)->pdev->device == 0x0102 || \
1782 (dev)->pdev->device == 0x0106 || \
1783 (dev)->pdev->device == 0x010A)
70a3eb7a 1784#define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
4cae9ae0 1785#define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
4e8058a2 1786#define IS_BROADWELL(dev) (INTEL_INFO(dev)->gen == 8)
cae5852d 1787#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
ed1c9e2c 1788#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
ffbab09b 1789 ((dev)->pdev->device & 0xFF00) == 0x0C00)
5dd8c4c3
BW
1790#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
1791 (((dev)->pdev->device & 0xf) == 0x2 || \
1792 ((dev)->pdev->device & 0xf) == 0x6 || \
1793 ((dev)->pdev->device & 0xf) == 0xe))
1794#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
ffbab09b 1795 ((dev)->pdev->device & 0xFF00) == 0x0A00)
5dd8c4c3 1796#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
9435373e 1797#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
ffbab09b 1798 ((dev)->pdev->device & 0x00F0) == 0x0020)
b833d685 1799#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
cae5852d 1800
85436696
JB
1801/*
1802 * The genX designation typically refers to the render engine, so render
1803 * capability related checks should use IS_GEN, while display and other checks
1804 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1805 * chips, etc.).
1806 */
cae5852d
ZN
1807#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1808#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1809#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1810#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1811#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
85436696 1812#define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
d2980845 1813#define IS_GEN8(dev) (INTEL_INFO(dev)->gen == 8)
cae5852d 1814
73ae478c
BW
1815#define RENDER_RING (1<<RCS)
1816#define BSD_RING (1<<VCS)
1817#define BLT_RING (1<<BCS)
1818#define VEBOX_RING (1<<VECS)
1819#define HAS_BSD(dev) (INTEL_INFO(dev)->ring_mask & BSD_RING)
1820#define HAS_BLT(dev) (INTEL_INFO(dev)->ring_mask & BLT_RING)
1821#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
3d29b842 1822#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
651d794f 1823#define HAS_WT(dev) (IS_HASWELL(dev) && to_i915(dev)->ellc_size)
cae5852d
ZN
1824#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1825
254f965c 1826#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
93553609 1827#define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1d2a314c 1828
05394f39 1829#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
cae5852d
ZN
1830#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1831
b45305fc
DV
1832/* Early gen2 have a totally busted CS tlb and require pinned batches. */
1833#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1834
cae5852d
ZN
1835/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1836 * rows, which changed the alignment requirements and fence programming.
1837 */
1838#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1839 IS_I915GM(dev)))
1840#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1841#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1842#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
cae5852d
ZN
1843#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1844#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
cae5852d
ZN
1845
1846#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1847#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
3a77c4c4 1848#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
cae5852d 1849
2a114cc1 1850#define HAS_IPS(dev) (IS_ULT(dev) || IS_BROADWELL(dev))
f5adf94e 1851
dd93be58 1852#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
30568c45 1853#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
ed8546ac 1854#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev))
7c6c2652 1855#define HAS_PC8(dev) (IS_HASWELL(dev)) /* XXX HSW:ULX */
df4547d8 1856#define HAS_RUNTIME_PM(dev) (IS_HASWELL(dev))
affa9354 1857
17a303ec
PZ
1858#define INTEL_PCH_DEVICE_ID_MASK 0xff00
1859#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1860#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1861#define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1862#define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1863#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1864
2c1792a1 1865#define INTEL_PCH_TYPE(dev) (to_i915(dev)->pch_type)
eb877ebf 1866#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
cae5852d
ZN
1867#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1868#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
40c7ead9 1869#define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
45e6e3a1 1870#define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
cae5852d 1871
040d2baa
BW
1872/* DPF == dynamic parity feature */
1873#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1874#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))
e1ef7cc2 1875
c8735b0c
BW
1876#define GT_FREQUENCY_MULTIPLIER 50
1877
05394f39
CW
1878#include "i915_trace.h"
1879
baa70943 1880extern const struct drm_ioctl_desc i915_ioctls[];
b3a83639 1881extern int i915_max_ioctl;
a35d9d3c
BW
1882extern unsigned int i915_fbpercrtc __always_unused;
1883extern int i915_panel_ignore_lid __read_mostly;
1884extern unsigned int i915_powersave __read_mostly;
f45b5557 1885extern int i915_semaphores __read_mostly;
a35d9d3c 1886extern unsigned int i915_lvds_downclock __read_mostly;
121d527a 1887extern int i915_lvds_channel_mode __read_mostly;
4415e63b 1888extern int i915_panel_use_ssc __read_mostly;
a35d9d3c 1889extern int i915_vbt_sdvo_panel_type __read_mostly;
c0f372b3 1890extern int i915_enable_rc6 __read_mostly;
4415e63b 1891extern int i915_enable_fbc __read_mostly;
a35d9d3c 1892extern bool i915_enable_hangcheck __read_mostly;
650dc07e 1893extern int i915_enable_ppgtt __read_mostly;
105b7c11 1894extern int i915_enable_psr __read_mostly;
0a3af268 1895extern unsigned int i915_preliminary_hw_support __read_mostly;
2124b72e 1896extern int i915_disable_power_well __read_mostly;
3c4ca58c 1897extern int i915_enable_ips __read_mostly;
2385bdf0 1898extern bool i915_fastboot __read_mostly;
c67a470b 1899extern int i915_enable_pc8 __read_mostly;
90058745 1900extern int i915_pc8_timeout __read_mostly;
0b74b508 1901extern bool i915_prefault_disable __read_mostly;
b3a83639 1902
6a9ee8af
DA
1903extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1904extern int i915_resume(struct drm_device *dev);
7c1c2871
DA
1905extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1906extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1907
1da177e4 1908 /* i915_dma.c */
d05c617e 1909void i915_update_dri1_breadcrumb(struct drm_device *dev);
84b1fd10 1910extern void i915_kernel_lost_context(struct drm_device * dev);
22eae947 1911extern int i915_driver_load(struct drm_device *, unsigned long flags);
ba8bbcf6 1912extern int i915_driver_unload(struct drm_device *);
673a394b 1913extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
84b1fd10 1914extern void i915_driver_lastclose(struct drm_device * dev);
6c340eac
EA
1915extern void i915_driver_preclose(struct drm_device *dev,
1916 struct drm_file *file_priv);
673a394b
EA
1917extern void i915_driver_postclose(struct drm_device *dev,
1918 struct drm_file *file_priv);
84b1fd10 1919extern int i915_driver_device_is_agp(struct drm_device * dev);
c43b5634 1920#ifdef CONFIG_COMPAT
0d6aa60b
DA
1921extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1922 unsigned long arg);
c43b5634 1923#endif
673a394b 1924extern int i915_emit_box(struct drm_device *dev,
c4e7a414
CW
1925 struct drm_clip_rect *box,
1926 int DR1, int DR4);
8e96d9c4 1927extern int intel_gpu_reset(struct drm_device *dev);
d4b8bb2a 1928extern int i915_reset(struct drm_device *dev);
7648fa99
JB
1929extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1930extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1931extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1932extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1933
073f34d9 1934extern void intel_console_resume(struct work_struct *work);
af6061af 1935
1da177e4 1936/* i915_irq.c */
10cd45b6 1937void i915_queue_hangcheck(struct drm_device *dev);
527f9e90 1938void i915_handle_error(struct drm_device *dev, bool wedged);
1da177e4 1939
f71d4af4 1940extern void intel_irq_init(struct drm_device *dev);
20afbda2 1941extern void intel_hpd_init(struct drm_device *dev);
907b28c5
CW
1942
1943extern void intel_uncore_sanitize(struct drm_device *dev);
1944extern void intel_uncore_early_sanitize(struct drm_device *dev);
1945extern void intel_uncore_init(struct drm_device *dev);
907b28c5 1946extern void intel_uncore_check_errors(struct drm_device *dev);
aec347ab 1947extern void intel_uncore_fini(struct drm_device *dev);
b1f14ad0 1948
7c463586 1949void
3b6c42e8 1950i915_enable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
7c463586
KP
1951
1952void
3b6c42e8 1953i915_disable_pipestat(drm_i915_private_t *dev_priv, enum pipe pipe, u32 mask);
7c463586 1954
673a394b
EA
1955/* i915_gem.c */
1956int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1957 struct drm_file *file_priv);
1958int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1959 struct drm_file *file_priv);
1960int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1961 struct drm_file *file_priv);
1962int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1963 struct drm_file *file_priv);
1964int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1965 struct drm_file *file_priv);
de151cf6
JB
1966int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1967 struct drm_file *file_priv);
673a394b
EA
1968int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1969 struct drm_file *file_priv);
1970int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1971 struct drm_file *file_priv);
1972int i915_gem_execbuffer(struct drm_device *dev, void *data,
1973 struct drm_file *file_priv);
76446cac
JB
1974int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1975 struct drm_file *file_priv);
673a394b
EA
1976int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1977 struct drm_file *file_priv);
1978int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1979 struct drm_file *file_priv);
1980int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1981 struct drm_file *file_priv);
199adf40
BW
1982int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1983 struct drm_file *file);
1984int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1985 struct drm_file *file);
673a394b
EA
1986int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1987 struct drm_file *file_priv);
3ef94daa
CW
1988int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1989 struct drm_file *file_priv);
673a394b
EA
1990int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1991 struct drm_file *file_priv);
1992int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1993 struct drm_file *file_priv);
1994int i915_gem_set_tiling(struct drm_device *dev, void *data,
1995 struct drm_file *file_priv);
1996int i915_gem_get_tiling(struct drm_device *dev, void *data,
1997 struct drm_file *file_priv);
5a125c3c
EA
1998int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1999 struct drm_file *file_priv);
23ba4fd0
BW
2000int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
2001 struct drm_file *file_priv);
673a394b 2002void i915_gem_load(struct drm_device *dev);
42dcedd4
CW
2003void *i915_gem_object_alloc(struct drm_device *dev);
2004void i915_gem_object_free(struct drm_i915_gem_object *obj);
37e680a1
CW
2005void i915_gem_object_init(struct drm_i915_gem_object *obj,
2006 const struct drm_i915_gem_object_ops *ops);
05394f39
CW
2007struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
2008 size_t size);
673a394b 2009void i915_gem_free_object(struct drm_gem_object *obj);
2f633156 2010void i915_gem_vma_destroy(struct i915_vma *vma);
42dcedd4 2011
2021746e 2012int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
c37e2204 2013 struct i915_address_space *vm,
2021746e 2014 uint32_t alignment,
86a1ee26
CW
2015 bool map_and_fenceable,
2016 bool nonblocking);
05394f39 2017void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
07fe0b12
BW
2018int __must_check i915_vma_unbind(struct i915_vma *vma);
2019int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);
dd624afd 2020int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
48018a57 2021void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);
05394f39 2022void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
673a394b 2023void i915_gem_lastclose(struct drm_device *dev);
f787a5f5 2024
37e680a1 2025int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
9da3da66
CW
2026static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
2027{
67d5a50c
ID
2028 struct sg_page_iter sg_iter;
2029
2030 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
2db76d7c 2031 return sg_page_iter_page(&sg_iter);
67d5a50c
ID
2032
2033 return NULL;
9da3da66 2034}
a5570178
CW
2035static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
2036{
2037 BUG_ON(obj->pages == NULL);
2038 obj->pages_pin_count++;
2039}
2040static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
2041{
2042 BUG_ON(obj->pages_pin_count == 0);
2043 obj->pages_pin_count--;
2044}
2045
54cf91dc 2046int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
2911a35b
BW
2047int i915_gem_object_sync(struct drm_i915_gem_object *obj,
2048 struct intel_ring_buffer *to);
e2d05a8b
BW
2049void i915_vma_move_to_active(struct i915_vma *vma,
2050 struct intel_ring_buffer *ring);
ff72145b
DA
2051int i915_gem_dumb_create(struct drm_file *file_priv,
2052 struct drm_device *dev,
2053 struct drm_mode_create_dumb *args);
2054int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
2055 uint32_t handle, uint64_t *offset);
f787a5f5
CW
2056/**
2057 * Returns true if seq1 is later than seq2.
2058 */
2059static inline bool
2060i915_seqno_passed(uint32_t seq1, uint32_t seq2)
2061{
2062 return (int32_t)(seq1 - seq2) >= 0;
2063}
2064
fca26bb4
MK
2065int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
2066int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
06d98131 2067int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
d9e86c0e 2068int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
2021746e 2069
9a5a53b3 2070static inline bool
1690e1eb
CW
2071i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
2072{
2073 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2074 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2075 dev_priv->fence_regs[obj->fence_reg].pin_count++;
9a5a53b3
CW
2076 return true;
2077 } else
2078 return false;
1690e1eb
CW
2079}
2080
2081static inline void
2082i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
2083{
2084 if (obj->fence_reg != I915_FENCE_REG_NONE) {
2085 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
b8c3af76 2086 WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0);
1690e1eb
CW
2087 dev_priv->fence_regs[obj->fence_reg].pin_count--;
2088 }
2089}
2090
b29c19b6 2091bool i915_gem_retire_requests(struct drm_device *dev);
a71d8d94 2092void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
33196ded 2093int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
d6b2c790 2094 bool interruptible);
1f83fee0
DV
2095static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
2096{
2097 return unlikely(atomic_read(&error->reset_counter)
2ac0f450 2098 & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
1f83fee0
DV
2099}
2100
2101static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
2102{
2ac0f450
MK
2103 return atomic_read(&error->reset_counter) & I915_WEDGED;
2104}
2105
2106static inline u32 i915_reset_count(struct i915_gpu_error *error)
2107{
2108 return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
1f83fee0 2109}
a71d8d94 2110
069efc1d 2111void i915_gem_reset(struct drm_device *dev);
000433b6 2112bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
a8198eea 2113int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1070a42b 2114int __must_check i915_gem_init(struct drm_device *dev);
f691e2f4 2115int __must_check i915_gem_init_hw(struct drm_device *dev);
c3787e2e 2116int i915_gem_l3_remap(struct intel_ring_buffer *ring, int slice);
f691e2f4 2117void i915_gem_init_swizzling(struct drm_device *dev);
79e53945 2118void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
b2da9fe5 2119int __must_check i915_gpu_idle(struct drm_device *dev);
45c5f202 2120int __must_check i915_gem_suspend(struct drm_device *dev);
0025c077
MK
2121int __i915_add_request(struct intel_ring_buffer *ring,
2122 struct drm_file *file,
7d736f4f 2123 struct drm_i915_gem_object *batch_obj,
0025c077
MK
2124 u32 *seqno);
2125#define i915_add_request(ring, seqno) \
854c94a7 2126 __i915_add_request(ring, NULL, NULL, seqno)
199b2bc2
BW
2127int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
2128 uint32_t seqno);
de151cf6 2129int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
2021746e
CW
2130int __must_check
2131i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
2132 bool write);
2133int __must_check
dabdfe02
CW
2134i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
2135int __must_check
2da3b9b9
CW
2136i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
2137 u32 alignment,
2021746e 2138 struct intel_ring_buffer *pipelined);
cc98b413 2139void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj);
71acb5eb 2140int i915_gem_attach_phys_object(struct drm_device *dev,
05394f39 2141 struct drm_i915_gem_object *obj,
6eeefaf3
CW
2142 int id,
2143 int align);
71acb5eb 2144void i915_gem_detach_phys_object(struct drm_device *dev,
05394f39 2145 struct drm_i915_gem_object *obj);
71acb5eb 2146void i915_gem_free_all_phys_object(struct drm_device *dev);
b29c19b6 2147int i915_gem_open(struct drm_device *dev, struct drm_file *file);
05394f39 2148void i915_gem_release(struct drm_device *dev, struct drm_file *file);
673a394b 2149
0fa87796
ID
2150uint32_t
2151i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
467cffba 2152uint32_t
d865110c
ID
2153i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
2154 int tiling_mode, bool fenced);
467cffba 2155
e4ffd173
CW
2156int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
2157 enum i915_cache_level cache_level);
2158
1286ff73
DV
2159struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
2160 struct dma_buf *dma_buf);
2161
2162struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
2163 struct drm_gem_object *gem_obj, int flags);
2164
19b2dbde
CW
2165void i915_gem_restore_fences(struct drm_device *dev);
2166
a70a3148
BW
2167unsigned long i915_gem_obj_offset(struct drm_i915_gem_object *o,
2168 struct i915_address_space *vm);
2169bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o);
2170bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
2171 struct i915_address_space *vm);
2172unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
2173 struct i915_address_space *vm);
2174struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
2175 struct i915_address_space *vm);
accfef2e
BW
2176struct i915_vma *
2177i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2178 struct i915_address_space *vm);
5c2abbea
BW
2179
2180struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj);
2181
a70a3148
BW
2182/* Some GGTT VM helpers */
2183#define obj_to_ggtt(obj) \
2184 (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
2185static inline bool i915_is_ggtt(struct i915_address_space *vm)
2186{
2187 struct i915_address_space *ggtt =
2188 &((struct drm_i915_private *)(vm)->dev->dev_private)->gtt.base;
2189 return vm == ggtt;
2190}
2191
2192static inline bool i915_gem_obj_ggtt_bound(struct drm_i915_gem_object *obj)
2193{
2194 return i915_gem_obj_bound(obj, obj_to_ggtt(obj));
2195}
2196
2197static inline unsigned long
2198i915_gem_obj_ggtt_offset(struct drm_i915_gem_object *obj)
2199{
2200 return i915_gem_obj_offset(obj, obj_to_ggtt(obj));
2201}
2202
2203static inline unsigned long
2204i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
2205{
2206 return i915_gem_obj_size(obj, obj_to_ggtt(obj));
2207}
c37e2204
BW
2208
2209static inline int __must_check
2210i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
2211 uint32_t alignment,
2212 bool map_and_fenceable,
2213 bool nonblocking)
2214{
2215 return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment,
2216 map_and_fenceable, nonblocking);
2217}
a70a3148 2218
254f965c 2219/* i915_gem_context.c */
8245be31 2220int __must_check i915_gem_context_init(struct drm_device *dev);
254f965c 2221void i915_gem_context_fini(struct drm_device *dev);
254f965c 2222void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
e0556841
BW
2223int i915_switch_context(struct intel_ring_buffer *ring,
2224 struct drm_file *file, int to_id);
dce3271b
MK
2225void i915_gem_context_free(struct kref *ctx_ref);
2226static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
2227{
2228 kref_get(&ctx->ref);
2229}
2230
2231static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
2232{
2233 kref_put(&ctx->ref, i915_gem_context_free);
2234}
2235
c0bb617a 2236struct i915_ctx_hang_stats * __must_check
11fa3384 2237i915_gem_context_get_hang_stats(struct drm_device *dev,
c0bb617a
MK
2238 struct drm_file *file,
2239 u32 id);
84624813
BW
2240int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
2241 struct drm_file *file);
2242int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
2243 struct drm_file *file);
1286ff73 2244
76aaf220 2245/* i915_gem_gtt.c */
1d2a314c 2246void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
7bddb01f
DV
2247void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
2248 struct drm_i915_gem_object *obj,
2249 enum i915_cache_level cache_level);
2250void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
2251 struct drm_i915_gem_object *obj);
1d2a314c 2252
828c7908
BW
2253void i915_check_and_clear_faults(struct drm_device *dev);
2254void i915_gem_suspend_gtt_mappings(struct drm_device *dev);
76aaf220 2255void i915_gem_restore_gtt_mappings(struct drm_device *dev);
74163907
DV
2256int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
2257void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
e4ffd173 2258 enum i915_cache_level cache_level);
05394f39 2259void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
74163907 2260void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
d7e5008f
BW
2261void i915_gem_init_global_gtt(struct drm_device *dev);
2262void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
2263 unsigned long mappable_end, unsigned long end);
e76e9aeb 2264int i915_gem_gtt_init(struct drm_device *dev);
d09105c6 2265static inline void i915_gem_chipset_flush(struct drm_device *dev)
e76e9aeb
BW
2266{
2267 if (INTEL_INFO(dev)->gen < 6)
2268 intel_gtt_chipset_flush();
2269}
2270
76aaf220 2271
b47eb4a2 2272/* i915_gem_evict.c */
f6cd1f15
BW
2273int __must_check i915_gem_evict_something(struct drm_device *dev,
2274 struct i915_address_space *vm,
2275 int min_size,
42d6ab48
CW
2276 unsigned alignment,
2277 unsigned cache_level,
86a1ee26
CW
2278 bool mappable,
2279 bool nonblock);
68c8c17f 2280int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
6c085a72 2281int i915_gem_evict_everything(struct drm_device *dev);
b47eb4a2 2282
9797fbfb
CW
2283/* i915_gem_stolen.c */
2284int i915_gem_init_stolen(struct drm_device *dev);
11be49eb
CW
2285int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
2286void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
9797fbfb 2287void i915_gem_cleanup_stolen(struct drm_device *dev);
0104fdbb
CW
2288struct drm_i915_gem_object *
2289i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
866d12b4
CW
2290struct drm_i915_gem_object *
2291i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
2292 u32 stolen_offset,
2293 u32 gtt_offset,
2294 u32 size);
0104fdbb 2295void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
9797fbfb 2296
673a394b 2297/* i915_gem_tiling.c */
2c1792a1 2298static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
e9b73c67
CW
2299{
2300 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
2301
2302 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
2303 obj->tiling_mode != I915_TILING_NONE;
2304}
2305
673a394b 2306void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
05394f39
CW
2307void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
2308void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
673a394b
EA
2309
2310/* i915_gem_debug.c */
23bc5982
CW
2311#if WATCH_LISTS
2312int i915_verify_lists(struct drm_device *dev);
673a394b 2313#else
23bc5982 2314#define i915_verify_lists(dev) 0
673a394b 2315#endif
1da177e4 2316
2017263e 2317/* i915_debugfs.c */
27c202ad
BG
2318int i915_debugfs_init(struct drm_minor *minor);
2319void i915_debugfs_cleanup(struct drm_minor *minor);
f8c168fa 2320#ifdef CONFIG_DEBUG_FS
07144428
DL
2321void intel_display_crc_init(struct drm_device *dev);
2322#else
f8c168fa 2323static inline void intel_display_crc_init(struct drm_device *dev) {}
07144428 2324#endif
84734a04
MK
2325
2326/* i915_gpu_error.c */
edc3d884
MK
2327__printf(2, 3)
2328void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...);
fc16b48b
MK
2329int i915_error_state_to_str(struct drm_i915_error_state_buf *estr,
2330 const struct i915_error_state_file_priv *error);
4dc955f7
MK
2331int i915_error_state_buf_init(struct drm_i915_error_state_buf *eb,
2332 size_t count, loff_t pos);
2333static inline void i915_error_state_buf_release(
2334 struct drm_i915_error_state_buf *eb)
2335{
2336 kfree(eb->buf);
2337}
84734a04
MK
2338void i915_capture_error_state(struct drm_device *dev);
2339void i915_error_state_get(struct drm_device *dev,
2340 struct i915_error_state_file_priv *error_priv);
2341void i915_error_state_put(struct i915_error_state_file_priv *error_priv);
2342void i915_destroy_error_state(struct drm_device *dev);
2343
2344void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);
2345const char *i915_cache_level_str(int type);
2017263e 2346
317c35d1
JB
2347/* i915_suspend.c */
2348extern int i915_save_state(struct drm_device *dev);
2349extern int i915_restore_state(struct drm_device *dev);
0a3e67a4 2350
d8157a36
DV
2351/* i915_ums.c */
2352void i915_save_display_reg(struct drm_device *dev);
2353void i915_restore_display_reg(struct drm_device *dev);
317c35d1 2354
0136db58
BW
2355/* i915_sysfs.c */
2356void i915_setup_sysfs(struct drm_device *dev_priv);
2357void i915_teardown_sysfs(struct drm_device *dev_priv);
2358
f899fc64
CW
2359/* intel_i2c.c */
2360extern int intel_setup_gmbus(struct drm_device *dev);
2361extern void intel_teardown_gmbus(struct drm_device *dev);
8f375e10 2362static inline bool intel_gmbus_is_port_valid(unsigned port)
3bd7d909 2363{
2ed06c93 2364 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
3bd7d909
DK
2365}
2366
2367extern struct i2c_adapter *intel_gmbus_get_adapter(
2368 struct drm_i915_private *dev_priv, unsigned port);
e957d772
CW
2369extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
2370extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
8f375e10 2371static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
b8232e90
CW
2372{
2373 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
2374}
f899fc64
CW
2375extern void intel_i2c_reset(struct drm_device *dev);
2376
3b617967 2377/* intel_opregion.c */
9c4b0a68 2378struct intel_encoder;
44834a67 2379#ifdef CONFIG_ACPI
27d50c82 2380extern int intel_opregion_setup(struct drm_device *dev);
44834a67
CW
2381extern void intel_opregion_init(struct drm_device *dev);
2382extern void intel_opregion_fini(struct drm_device *dev);
3b617967 2383extern void intel_opregion_asle_intr(struct drm_device *dev);
9c4b0a68
JN
2384extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
2385 bool enable);
ecbc5cf3
JN
2386extern int intel_opregion_notify_adapter(struct drm_device *dev,
2387 pci_power_t state);
65e082c9 2388#else
27d50c82 2389static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
44834a67
CW
2390static inline void intel_opregion_init(struct drm_device *dev) { return; }
2391static inline void intel_opregion_fini(struct drm_device *dev) { return; }
3b617967 2392static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
9c4b0a68
JN
2393static inline int
2394intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable)
2395{
2396 return 0;
2397}
ecbc5cf3
JN
2398static inline int
2399intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
2400{
2401 return 0;
2402}
65e082c9 2403#endif
8ee1c3db 2404
723bfd70
JB
2405/* intel_acpi.c */
2406#ifdef CONFIG_ACPI
2407extern void intel_register_dsm_handler(void);
2408extern void intel_unregister_dsm_handler(void);
2409#else
2410static inline void intel_register_dsm_handler(void) { return; }
2411static inline void intel_unregister_dsm_handler(void) { return; }
2412#endif /* CONFIG_ACPI */
2413
79e53945 2414/* modesetting */
f817586c 2415extern void intel_modeset_init_hw(struct drm_device *dev);
7d708ee4 2416extern void intel_modeset_suspend_hw(struct drm_device *dev);
79e53945 2417extern void intel_modeset_init(struct drm_device *dev);
2c7111db 2418extern void intel_modeset_gem_init(struct drm_device *dev);
79e53945 2419extern void intel_modeset_cleanup(struct drm_device *dev);
28d52043 2420extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
45e2b5f6
DV
2421extern void intel_modeset_setup_hw_state(struct drm_device *dev,
2422 bool force_restore);
44cec740 2423extern void i915_redisable_vga(struct drm_device *dev);
ee5382ae 2424extern bool intel_fbc_enabled(struct drm_device *dev);
43a9539f 2425extern void intel_disable_fbc(struct drm_device *dev);
7648fa99 2426extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
dde86e2d 2427extern void intel_init_pch_refclk(struct drm_device *dev);
3b8d8d91 2428extern void gen6_set_rps(struct drm_device *dev, u8 val);
0a073b84
JB
2429extern void valleyview_set_rps(struct drm_device *dev, u8 val);
2430extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
2431extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
0206e353
AJ
2432extern void intel_detect_pch(struct drm_device *dev);
2433extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
0136db58 2434extern int intel_enable_rc6(const struct drm_device *dev);
3bad0781 2435
2911a35b 2436extern bool i915_semaphore_is_enabled(struct drm_device *dev);
c0c7babc
BW
2437int i915_reg_read_ioctl(struct drm_device *dev, void *data,
2438 struct drm_file *file);
b6359918
MK
2439int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data,
2440 struct drm_file *file);
575155a9 2441
6ef3d427
CW
2442/* overlay */
2443extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
edc3d884
MK
2444extern void intel_overlay_print_error_state(struct drm_i915_error_state_buf *e,
2445 struct intel_overlay_error_state *error);
c4a1d9e4
CW
2446
2447extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
edc3d884 2448extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,
c4a1d9e4
CW
2449 struct drm_device *dev,
2450 struct intel_display_error_state *error);
6ef3d427 2451
b7287d80
BW
2452/* On SNB platform, before reading ring registers forcewake bit
2453 * must be set to prevent GT core from power down and stale values being
2454 * returned.
2455 */
c8d9a590
D
2456void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2457void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
b7287d80 2458
42c0526c
BW
2459int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
2460int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
59de0813
JN
2461
2462/* intel_sideband.c */
64936258
JN
2463u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);
2464void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
2465u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr);
e9f882a3
JN
2466u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg);
2467void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2468u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg);
2469void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
2470u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg);
2471void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
f3419158
JB
2472u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg);
2473void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
e9f882a3
JN
2474u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg);
2475void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
5e69f97f
CML
2476u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg);
2477void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);
59de0813
JN
2478u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,
2479 enum intel_sbi_destination destination);
2480void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,
2481 enum intel_sbi_destination destination);
e9fe51c6
SK
2482u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
2483void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
0a073b84 2484
2ec3815f
VS
2485int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val);
2486int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val);
42c0526c 2487
940aece4
D
2488void vlv_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine);
2489void vlv_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine);
2490
2491#define FORCEWAKE_VLV_RENDER_RANGE_OFFSET(reg) \
2492 (((reg) >= 0x2000 && (reg) < 0x4000) ||\
2493 ((reg) >= 0x5000 && (reg) < 0x8000) ||\
2494 ((reg) >= 0xB000 && (reg) < 0x12000) ||\
2495 ((reg) >= 0x2E000 && (reg) < 0x30000))
2496
2497#define FORCEWAKE_VLV_MEDIA_RANGE_OFFSET(reg)\
2498 (((reg) >= 0x12000 && (reg) < 0x14000) ||\
2499 ((reg) >= 0x22000 && (reg) < 0x24000) ||\
2500 ((reg) >= 0x30000 && (reg) < 0x40000))
2501
c8d9a590
D
2502#define FORCEWAKE_RENDER (1 << 0)
2503#define FORCEWAKE_MEDIA (1 << 1)
2504#define FORCEWAKE_ALL (FORCEWAKE_RENDER | FORCEWAKE_MEDIA)
0a073b84 2505
42c0526c 2506
0b274481
BW
2507#define I915_READ8(reg) dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true)
2508#define I915_WRITE8(reg, val) dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true)
2509
2510#define I915_READ16(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true)
2511#define I915_WRITE16(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true)
2512#define I915_READ16_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false)
2513#define I915_WRITE16_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false)
2514
2515#define I915_READ(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true)
2516#define I915_WRITE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true)
2517#define I915_READ_NOTRACE(reg) dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false)
2518#define I915_WRITE_NOTRACE(reg, val) dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false)
2519
2520#define I915_WRITE64(reg, val) dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true)
2521#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
cae5852d
ZN
2522
2523#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
2524#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
2525
55bc60db
VS
2526/* "Broadcast RGB" property */
2527#define INTEL_BROADCAST_RGB_AUTO 0
2528#define INTEL_BROADCAST_RGB_FULL 1
2529#define INTEL_BROADCAST_RGB_LIMITED 2
ba4f01a3 2530
766aa1c4
VS
2531static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
2532{
2533 if (HAS_PCH_SPLIT(dev))
2534 return CPU_VGACNTRL;
2535 else if (IS_VALLEYVIEW(dev))
2536 return VLV_VGACNTRL;
2537 else
2538 return VGACNTRL;
2539}
2540
2bb4629a
VS
2541static inline void __user *to_user_ptr(u64 address)
2542{
2543 return (void __user *)(uintptr_t)address;
2544}
2545
df97729f
ID
2546static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
2547{
2548 unsigned long j = msecs_to_jiffies(m);
2549
2550 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2551}
2552
2553static inline unsigned long
2554timespec_to_jiffies_timeout(const struct timespec *value)
2555{
2556 unsigned long j = timespec_to_jiffies(value);
2557
2558 return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);
2559}
2560
1da177e4 2561#endif
This page took 1.176719 seconds and 5 git commands to generate.