drm/i915: remove intel_update_linetime_watermarks
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.h
1 /* i915_drv.h -- Private header for the I915 driver -*- linux-c -*-
2 */
3 /*
4 *
5 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
6 * All Rights Reserved.
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the
17 * next paragraph) shall be included in all copies or substantial portions
18 * of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
21 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
22 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
23 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
24 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
25 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
26 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 *
28 */
29
30 #ifndef _I915_DRV_H_
31 #define _I915_DRV_H_
32
33 #include <uapi/drm/i915_drm.h>
34
35 #include "i915_reg.h"
36 #include "intel_bios.h"
37 #include "intel_ringbuffer.h"
38 #include <linux/io-mapping.h>
39 #include <linux/i2c.h>
40 #include <linux/i2c-algo-bit.h>
41 #include <drm/intel-gtt.h>
42 #include <linux/backlight.h>
43 #include <linux/intel-iommu.h>
44 #include <linux/kref.h>
45 #include <linux/pm_qos.h>
46
47 /* General customization:
48 */
49
50 #define DRIVER_AUTHOR "Tungsten Graphics, Inc."
51
52 #define DRIVER_NAME "i915"
53 #define DRIVER_DESC "Intel Graphics"
54 #define DRIVER_DATE "20080730"
55
56 enum pipe {
57 PIPE_A = 0,
58 PIPE_B,
59 PIPE_C,
60 I915_MAX_PIPES
61 };
62 #define pipe_name(p) ((p) + 'A')
63
64 enum transcoder {
65 TRANSCODER_A = 0,
66 TRANSCODER_B,
67 TRANSCODER_C,
68 TRANSCODER_EDP = 0xF,
69 };
70 #define transcoder_name(t) ((t) + 'A')
71
72 enum plane {
73 PLANE_A = 0,
74 PLANE_B,
75 PLANE_C,
76 };
77 #define plane_name(p) ((p) + 'A')
78
79 #define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A')
80
81 enum port {
82 PORT_A = 0,
83 PORT_B,
84 PORT_C,
85 PORT_D,
86 PORT_E,
87 I915_MAX_PORTS
88 };
89 #define port_name(p) ((p) + 'A')
90
91 enum intel_display_power_domain {
92 POWER_DOMAIN_PIPE_A,
93 POWER_DOMAIN_PIPE_B,
94 POWER_DOMAIN_PIPE_C,
95 POWER_DOMAIN_PIPE_A_PANEL_FITTER,
96 POWER_DOMAIN_PIPE_B_PANEL_FITTER,
97 POWER_DOMAIN_PIPE_C_PANEL_FITTER,
98 POWER_DOMAIN_TRANSCODER_A,
99 POWER_DOMAIN_TRANSCODER_B,
100 POWER_DOMAIN_TRANSCODER_C,
101 POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF,
102 };
103
104 #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)
105 #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \
106 ((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER)
107 #define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A)
108
109 enum hpd_pin {
110 HPD_NONE = 0,
111 HPD_PORT_A = HPD_NONE, /* PORT_A is internal */
112 HPD_TV = HPD_NONE, /* TV is known to be unreliable */
113 HPD_CRT,
114 HPD_SDVO_B,
115 HPD_SDVO_C,
116 HPD_PORT_B,
117 HPD_PORT_C,
118 HPD_PORT_D,
119 HPD_NUM_PINS
120 };
121
122 #define I915_GEM_GPU_DOMAINS \
123 (I915_GEM_DOMAIN_RENDER | \
124 I915_GEM_DOMAIN_SAMPLER | \
125 I915_GEM_DOMAIN_COMMAND | \
126 I915_GEM_DOMAIN_INSTRUCTION | \
127 I915_GEM_DOMAIN_VERTEX)
128
129 #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++)
130
131 #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \
132 list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \
133 if ((intel_encoder)->base.crtc == (__crtc))
134
135 struct intel_pch_pll {
136 int refcount; /* count of number of CRTCs sharing this PLL */
137 int active; /* count of number of active CRTCs (i.e. DPMS on) */
138 bool on; /* is the PLL actually active? Disabled during modeset */
139 int pll_reg;
140 int fp0_reg;
141 int fp1_reg;
142 };
143 #define I915_NUM_PLLS 2
144
145 /* Used by dp and fdi links */
146 struct intel_link_m_n {
147 uint32_t tu;
148 uint32_t gmch_m;
149 uint32_t gmch_n;
150 uint32_t link_m;
151 uint32_t link_n;
152 };
153
154 void intel_link_compute_m_n(int bpp, int nlanes,
155 int pixel_clock, int link_clock,
156 struct intel_link_m_n *m_n);
157
158 struct intel_ddi_plls {
159 int spll_refcount;
160 int wrpll1_refcount;
161 int wrpll2_refcount;
162 };
163
164 /* Interface history:
165 *
166 * 1.1: Original.
167 * 1.2: Add Power Management
168 * 1.3: Add vblank support
169 * 1.4: Fix cmdbuffer path, add heap destroy
170 * 1.5: Add vblank pipe configuration
171 * 1.6: - New ioctl for scheduling buffer swaps on vertical blank
172 * - Support vertical blank on secondary display pipe
173 */
174 #define DRIVER_MAJOR 1
175 #define DRIVER_MINOR 6
176 #define DRIVER_PATCHLEVEL 0
177
178 #define WATCH_COHERENCY 0
179 #define WATCH_LISTS 0
180 #define WATCH_GTT 0
181
182 #define I915_GEM_PHYS_CURSOR_0 1
183 #define I915_GEM_PHYS_CURSOR_1 2
184 #define I915_GEM_PHYS_OVERLAY_REGS 3
185 #define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS)
186
187 struct drm_i915_gem_phys_object {
188 int id;
189 struct page **page_list;
190 drm_dma_handle_t *handle;
191 struct drm_i915_gem_object *cur_obj;
192 };
193
194 struct opregion_header;
195 struct opregion_acpi;
196 struct opregion_swsci;
197 struct opregion_asle;
198 struct drm_i915_private;
199
200 struct intel_opregion {
201 struct opregion_header __iomem *header;
202 struct opregion_acpi __iomem *acpi;
203 struct opregion_swsci __iomem *swsci;
204 struct opregion_asle __iomem *asle;
205 void __iomem *vbt;
206 u32 __iomem *lid_state;
207 };
208 #define OPREGION_SIZE (8*1024)
209
210 struct intel_overlay;
211 struct intel_overlay_error_state;
212
213 struct drm_i915_master_private {
214 drm_local_map_t *sarea;
215 struct _drm_i915_sarea *sarea_priv;
216 };
217 #define I915_FENCE_REG_NONE -1
218 #define I915_MAX_NUM_FENCES 32
219 /* 32 fences + sign bit for FENCE_REG_NONE */
220 #define I915_MAX_NUM_FENCE_BITS 6
221
222 struct drm_i915_fence_reg {
223 struct list_head lru_list;
224 struct drm_i915_gem_object *obj;
225 int pin_count;
226 };
227
228 struct sdvo_device_mapping {
229 u8 initialized;
230 u8 dvo_port;
231 u8 slave_addr;
232 u8 dvo_wiring;
233 u8 i2c_pin;
234 u8 ddc_pin;
235 };
236
237 struct intel_display_error_state;
238
239 struct drm_i915_error_state {
240 struct kref ref;
241 u32 eir;
242 u32 pgtbl_er;
243 u32 ier;
244 u32 ccid;
245 u32 derrmr;
246 u32 forcewake;
247 bool waiting[I915_NUM_RINGS];
248 u32 pipestat[I915_MAX_PIPES];
249 u32 tail[I915_NUM_RINGS];
250 u32 head[I915_NUM_RINGS];
251 u32 ctl[I915_NUM_RINGS];
252 u32 ipeir[I915_NUM_RINGS];
253 u32 ipehr[I915_NUM_RINGS];
254 u32 instdone[I915_NUM_RINGS];
255 u32 acthd[I915_NUM_RINGS];
256 u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1];
257 u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1];
258 u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */
259 /* our own tracking of ring head and tail */
260 u32 cpu_ring_head[I915_NUM_RINGS];
261 u32 cpu_ring_tail[I915_NUM_RINGS];
262 u32 error; /* gen6+ */
263 u32 err_int; /* gen7 */
264 u32 instpm[I915_NUM_RINGS];
265 u32 instps[I915_NUM_RINGS];
266 u32 extra_instdone[I915_NUM_INSTDONE_REG];
267 u32 seqno[I915_NUM_RINGS];
268 u64 bbaddr;
269 u32 fault_reg[I915_NUM_RINGS];
270 u32 done_reg;
271 u32 faddr[I915_NUM_RINGS];
272 u64 fence[I915_MAX_NUM_FENCES];
273 struct timeval time;
274 struct drm_i915_error_ring {
275 struct drm_i915_error_object {
276 int page_count;
277 u32 gtt_offset;
278 u32 *pages[0];
279 } *ringbuffer, *batchbuffer, *ctx;
280 struct drm_i915_error_request {
281 long jiffies;
282 u32 seqno;
283 u32 tail;
284 } *requests;
285 int num_requests;
286 } ring[I915_NUM_RINGS];
287 struct drm_i915_error_buffer {
288 u32 size;
289 u32 name;
290 u32 rseqno, wseqno;
291 u32 gtt_offset;
292 u32 read_domains;
293 u32 write_domain;
294 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
295 s32 pinned:2;
296 u32 tiling:2;
297 u32 dirty:1;
298 u32 purgeable:1;
299 s32 ring:4;
300 u32 cache_level:2;
301 } *active_bo, *pinned_bo;
302 u32 active_bo_count, pinned_bo_count;
303 struct intel_overlay_error_state *overlay;
304 struct intel_display_error_state *display;
305 };
306
307 struct intel_crtc_config;
308 struct intel_crtc;
309
310 struct drm_i915_display_funcs {
311 bool (*fbc_enabled)(struct drm_device *dev);
312 void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval);
313 void (*disable_fbc)(struct drm_device *dev);
314 int (*get_display_clock_speed)(struct drm_device *dev);
315 int (*get_fifo_size)(struct drm_device *dev, int plane);
316 void (*update_wm)(struct drm_device *dev);
317 void (*update_sprite_wm)(struct drm_device *dev, int pipe,
318 uint32_t sprite_width, int pixel_size);
319 void (*modeset_global_resources)(struct drm_device *dev);
320 /* Returns the active state of the crtc, and if the crtc is active,
321 * fills out the pipe-config with the hw state. */
322 bool (*get_pipe_config)(struct intel_crtc *,
323 struct intel_crtc_config *);
324 int (*crtc_mode_set)(struct drm_crtc *crtc,
325 int x, int y,
326 struct drm_framebuffer *old_fb);
327 void (*crtc_enable)(struct drm_crtc *crtc);
328 void (*crtc_disable)(struct drm_crtc *crtc);
329 void (*off)(struct drm_crtc *crtc);
330 void (*write_eld)(struct drm_connector *connector,
331 struct drm_crtc *crtc);
332 void (*fdi_link_train)(struct drm_crtc *crtc);
333 void (*init_clock_gating)(struct drm_device *dev);
334 int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,
335 struct drm_framebuffer *fb,
336 struct drm_i915_gem_object *obj);
337 int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
338 int x, int y);
339 void (*hpd_irq_setup)(struct drm_device *dev);
340 /* clock updates for mode set */
341 /* cursor updates */
342 /* render clock increase/decrease */
343 /* display clock increase/decrease */
344 /* pll clock increase/decrease */
345 };
346
347 struct drm_i915_gt_funcs {
348 void (*force_wake_get)(struct drm_i915_private *dev_priv);
349 void (*force_wake_put)(struct drm_i915_private *dev_priv);
350 };
351
352 #define DEV_INFO_FOR_EACH_FLAG(func, sep) \
353 func(is_mobile) sep \
354 func(is_i85x) sep \
355 func(is_i915g) sep \
356 func(is_i945gm) sep \
357 func(is_g33) sep \
358 func(need_gfx_hws) sep \
359 func(is_g4x) sep \
360 func(is_pineview) sep \
361 func(is_broadwater) sep \
362 func(is_crestline) sep \
363 func(is_ivybridge) sep \
364 func(is_valleyview) sep \
365 func(is_haswell) sep \
366 func(has_force_wake) sep \
367 func(has_fbc) sep \
368 func(has_pipe_cxsr) sep \
369 func(has_hotplug) sep \
370 func(cursor_needs_physical) sep \
371 func(has_overlay) sep \
372 func(overlay_needs_physical) sep \
373 func(supports_tv) sep \
374 func(has_bsd_ring) sep \
375 func(has_blt_ring) sep \
376 func(has_llc) sep \
377 func(has_ddi) sep \
378 func(has_fpga_dbg)
379
380 #define DEFINE_FLAG(name) u8 name:1
381 #define SEP_SEMICOLON ;
382
383 struct intel_device_info {
384 u32 display_mmio_offset;
385 u8 num_pipes:3;
386 u8 gen;
387 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON);
388 };
389
390 #undef DEFINE_FLAG
391 #undef SEP_SEMICOLON
392
393 enum i915_cache_level {
394 I915_CACHE_NONE = 0,
395 I915_CACHE_LLC,
396 I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
397 };
398
399 typedef uint32_t gen6_gtt_pte_t;
400
401 /* The Graphics Translation Table is the way in which GEN hardware translates a
402 * Graphics Virtual Address into a Physical Address. In addition to the normal
403 * collateral associated with any va->pa translations GEN hardware also has a
404 * portion of the GTT which can be mapped by the CPU and remain both coherent
405 * and correct (in cases like swizzling). That region is referred to as GMADR in
406 * the spec.
407 */
408 struct i915_gtt {
409 unsigned long start; /* Start offset of used GTT */
410 size_t total; /* Total size GTT can map */
411 size_t stolen_size; /* Total size of stolen memory */
412
413 unsigned long mappable_end; /* End offset that we can CPU map */
414 struct io_mapping *mappable; /* Mapping to our CPU mappable region */
415 phys_addr_t mappable_base; /* PA of our GMADR */
416
417 /** "Graphics Stolen Memory" holds the global PTEs */
418 void __iomem *gsm;
419
420 bool do_idle_maps;
421 dma_addr_t scratch_page_dma;
422 struct page *scratch_page;
423
424 /* global gtt ops */
425 int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
426 size_t *stolen, phys_addr_t *mappable_base,
427 unsigned long *mappable_end);
428 void (*gtt_remove)(struct drm_device *dev);
429 void (*gtt_clear_range)(struct drm_device *dev,
430 unsigned int first_entry,
431 unsigned int num_entries);
432 void (*gtt_insert_entries)(struct drm_device *dev,
433 struct sg_table *st,
434 unsigned int pg_start,
435 enum i915_cache_level cache_level);
436 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
437 dma_addr_t addr,
438 enum i915_cache_level level);
439 };
440 #define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
441
442 #define I915_PPGTT_PD_ENTRIES 512
443 #define I915_PPGTT_PT_ENTRIES 1024
444 struct i915_hw_ppgtt {
445 struct drm_device *dev;
446 unsigned num_pd_entries;
447 struct page **pt_pages;
448 uint32_t pd_offset;
449 dma_addr_t *pt_dma_addr;
450 dma_addr_t scratch_page_dma_addr;
451
452 /* pte functions, mirroring the interface of the global gtt. */
453 void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
454 unsigned int first_entry,
455 unsigned int num_entries);
456 void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
457 struct sg_table *st,
458 unsigned int pg_start,
459 enum i915_cache_level cache_level);
460 gen6_gtt_pte_t (*pte_encode)(struct drm_device *dev,
461 dma_addr_t addr,
462 enum i915_cache_level level);
463 int (*enable)(struct drm_device *dev);
464 void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
465 };
466
467
468 /* This must match up with the value previously used for execbuf2.rsvd1. */
469 #define DEFAULT_CONTEXT_ID 0
470 struct i915_hw_context {
471 struct kref ref;
472 int id;
473 bool is_initialized;
474 struct drm_i915_file_private *file_priv;
475 struct intel_ring_buffer *ring;
476 struct drm_i915_gem_object *obj;
477 };
478
479 enum no_fbc_reason {
480 FBC_NO_OUTPUT, /* no outputs enabled to compress */
481 FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
482 FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
483 FBC_MODE_TOO_LARGE, /* mode too large for compression */
484 FBC_BAD_PLANE, /* fbc not supported on plane */
485 FBC_NOT_TILED, /* buffer not tiled */
486 FBC_MULTIPLE_PIPES, /* more than one pipe active */
487 FBC_MODULE_PARAM,
488 };
489
490 enum intel_pch {
491 PCH_NONE = 0, /* No PCH present */
492 PCH_IBX, /* Ibexpeak PCH */
493 PCH_CPT, /* Cougarpoint PCH */
494 PCH_LPT, /* Lynxpoint PCH */
495 PCH_NOP,
496 };
497
498 enum intel_sbi_destination {
499 SBI_ICLK,
500 SBI_MPHY,
501 };
502
503 #define QUIRK_PIPEA_FORCE (1<<0)
504 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
505 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
506
507 struct intel_fbdev;
508 struct intel_fbc_work;
509
510 struct intel_gmbus {
511 struct i2c_adapter adapter;
512 u32 force_bit;
513 u32 reg0;
514 u32 gpio_reg;
515 struct i2c_algo_bit_data bit_algo;
516 struct drm_i915_private *dev_priv;
517 };
518
519 struct i915_suspend_saved_registers {
520 u8 saveLBB;
521 u32 saveDSPACNTR;
522 u32 saveDSPBCNTR;
523 u32 saveDSPARB;
524 u32 savePIPEACONF;
525 u32 savePIPEBCONF;
526 u32 savePIPEASRC;
527 u32 savePIPEBSRC;
528 u32 saveFPA0;
529 u32 saveFPA1;
530 u32 saveDPLL_A;
531 u32 saveDPLL_A_MD;
532 u32 saveHTOTAL_A;
533 u32 saveHBLANK_A;
534 u32 saveHSYNC_A;
535 u32 saveVTOTAL_A;
536 u32 saveVBLANK_A;
537 u32 saveVSYNC_A;
538 u32 saveBCLRPAT_A;
539 u32 saveTRANSACONF;
540 u32 saveTRANS_HTOTAL_A;
541 u32 saveTRANS_HBLANK_A;
542 u32 saveTRANS_HSYNC_A;
543 u32 saveTRANS_VTOTAL_A;
544 u32 saveTRANS_VBLANK_A;
545 u32 saveTRANS_VSYNC_A;
546 u32 savePIPEASTAT;
547 u32 saveDSPASTRIDE;
548 u32 saveDSPASIZE;
549 u32 saveDSPAPOS;
550 u32 saveDSPAADDR;
551 u32 saveDSPASURF;
552 u32 saveDSPATILEOFF;
553 u32 savePFIT_PGM_RATIOS;
554 u32 saveBLC_HIST_CTL;
555 u32 saveBLC_PWM_CTL;
556 u32 saveBLC_PWM_CTL2;
557 u32 saveBLC_CPU_PWM_CTL;
558 u32 saveBLC_CPU_PWM_CTL2;
559 u32 saveFPB0;
560 u32 saveFPB1;
561 u32 saveDPLL_B;
562 u32 saveDPLL_B_MD;
563 u32 saveHTOTAL_B;
564 u32 saveHBLANK_B;
565 u32 saveHSYNC_B;
566 u32 saveVTOTAL_B;
567 u32 saveVBLANK_B;
568 u32 saveVSYNC_B;
569 u32 saveBCLRPAT_B;
570 u32 saveTRANSBCONF;
571 u32 saveTRANS_HTOTAL_B;
572 u32 saveTRANS_HBLANK_B;
573 u32 saveTRANS_HSYNC_B;
574 u32 saveTRANS_VTOTAL_B;
575 u32 saveTRANS_VBLANK_B;
576 u32 saveTRANS_VSYNC_B;
577 u32 savePIPEBSTAT;
578 u32 saveDSPBSTRIDE;
579 u32 saveDSPBSIZE;
580 u32 saveDSPBPOS;
581 u32 saveDSPBADDR;
582 u32 saveDSPBSURF;
583 u32 saveDSPBTILEOFF;
584 u32 saveVGA0;
585 u32 saveVGA1;
586 u32 saveVGA_PD;
587 u32 saveVGACNTRL;
588 u32 saveADPA;
589 u32 saveLVDS;
590 u32 savePP_ON_DELAYS;
591 u32 savePP_OFF_DELAYS;
592 u32 saveDVOA;
593 u32 saveDVOB;
594 u32 saveDVOC;
595 u32 savePP_ON;
596 u32 savePP_OFF;
597 u32 savePP_CONTROL;
598 u32 savePP_DIVISOR;
599 u32 savePFIT_CONTROL;
600 u32 save_palette_a[256];
601 u32 save_palette_b[256];
602 u32 saveDPFC_CB_BASE;
603 u32 saveFBC_CFB_BASE;
604 u32 saveFBC_LL_BASE;
605 u32 saveFBC_CONTROL;
606 u32 saveFBC_CONTROL2;
607 u32 saveIER;
608 u32 saveIIR;
609 u32 saveIMR;
610 u32 saveDEIER;
611 u32 saveDEIMR;
612 u32 saveGTIER;
613 u32 saveGTIMR;
614 u32 saveFDI_RXA_IMR;
615 u32 saveFDI_RXB_IMR;
616 u32 saveCACHE_MODE_0;
617 u32 saveMI_ARB_STATE;
618 u32 saveSWF0[16];
619 u32 saveSWF1[16];
620 u32 saveSWF2[3];
621 u8 saveMSR;
622 u8 saveSR[8];
623 u8 saveGR[25];
624 u8 saveAR_INDEX;
625 u8 saveAR[21];
626 u8 saveDACMASK;
627 u8 saveCR[37];
628 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
629 u32 saveCURACNTR;
630 u32 saveCURAPOS;
631 u32 saveCURABASE;
632 u32 saveCURBCNTR;
633 u32 saveCURBPOS;
634 u32 saveCURBBASE;
635 u32 saveCURSIZE;
636 u32 saveDP_B;
637 u32 saveDP_C;
638 u32 saveDP_D;
639 u32 savePIPEA_GMCH_DATA_M;
640 u32 savePIPEB_GMCH_DATA_M;
641 u32 savePIPEA_GMCH_DATA_N;
642 u32 savePIPEB_GMCH_DATA_N;
643 u32 savePIPEA_DP_LINK_M;
644 u32 savePIPEB_DP_LINK_M;
645 u32 savePIPEA_DP_LINK_N;
646 u32 savePIPEB_DP_LINK_N;
647 u32 saveFDI_RXA_CTL;
648 u32 saveFDI_TXA_CTL;
649 u32 saveFDI_RXB_CTL;
650 u32 saveFDI_TXB_CTL;
651 u32 savePFA_CTL_1;
652 u32 savePFB_CTL_1;
653 u32 savePFA_WIN_SZ;
654 u32 savePFB_WIN_SZ;
655 u32 savePFA_WIN_POS;
656 u32 savePFB_WIN_POS;
657 u32 savePCH_DREF_CONTROL;
658 u32 saveDISP_ARB_CTL;
659 u32 savePIPEA_DATA_M1;
660 u32 savePIPEA_DATA_N1;
661 u32 savePIPEA_LINK_M1;
662 u32 savePIPEA_LINK_N1;
663 u32 savePIPEB_DATA_M1;
664 u32 savePIPEB_DATA_N1;
665 u32 savePIPEB_LINK_M1;
666 u32 savePIPEB_LINK_N1;
667 u32 saveMCHBAR_RENDER_STANDBY;
668 u32 savePCH_PORT_HOTPLUG;
669 };
670
671 struct intel_gen6_power_mgmt {
672 struct work_struct work;
673 struct delayed_work vlv_work;
674 u32 pm_iir;
675 /* lock - irqsave spinlock that protectects the work_struct and
676 * pm_iir. */
677 spinlock_t lock;
678
679 /* The below variables an all the rps hw state are protected by
680 * dev->struct mutext. */
681 u8 cur_delay;
682 u8 min_delay;
683 u8 max_delay;
684 u8 rpe_delay;
685 u8 hw_max;
686
687 struct delayed_work delayed_resume_work;
688
689 /*
690 * Protects RPS/RC6 register access and PCU communication.
691 * Must be taken after struct_mutex if nested.
692 */
693 struct mutex hw_lock;
694 };
695
696 /* defined intel_pm.c */
697 extern spinlock_t mchdev_lock;
698
699 struct intel_ilk_power_mgmt {
700 u8 cur_delay;
701 u8 min_delay;
702 u8 max_delay;
703 u8 fmax;
704 u8 fstart;
705
706 u64 last_count1;
707 unsigned long last_time1;
708 unsigned long chipset_power;
709 u64 last_count2;
710 struct timespec last_time2;
711 unsigned long gfx_power;
712 u8 corr;
713
714 int c_m;
715 int r_t;
716
717 struct drm_i915_gem_object *pwrctx;
718 struct drm_i915_gem_object *renderctx;
719 };
720
721 struct i915_dri1_state {
722 unsigned allow_batchbuffer : 1;
723 u32 __iomem *gfx_hws_cpu_addr;
724
725 unsigned int cpp;
726 int back_offset;
727 int front_offset;
728 int current_page;
729 int page_flipping;
730
731 uint32_t counter;
732 };
733
734 struct intel_l3_parity {
735 u32 *remap_info;
736 struct work_struct error_work;
737 };
738
739 struct i915_gem_mm {
740 /** Memory allocator for GTT stolen memory */
741 struct drm_mm stolen;
742 /** Memory allocator for GTT */
743 struct drm_mm gtt_space;
744 /** List of all objects in gtt_space. Used to restore gtt
745 * mappings on resume */
746 struct list_head bound_list;
747 /**
748 * List of objects which are not bound to the GTT (thus
749 * are idle and not used by the GPU) but still have
750 * (presumably uncached) pages still attached.
751 */
752 struct list_head unbound_list;
753
754 /** Usable portion of the GTT for GEM */
755 unsigned long stolen_base; /* limited to low memory (32-bit) */
756
757 int gtt_mtrr;
758
759 /** PPGTT used for aliasing the PPGTT with the GTT */
760 struct i915_hw_ppgtt *aliasing_ppgtt;
761
762 struct shrinker inactive_shrinker;
763 bool shrinker_no_lock_stealing;
764
765 /**
766 * List of objects currently involved in rendering.
767 *
768 * Includes buffers having the contents of their GPU caches
769 * flushed, not necessarily primitives. last_rendering_seqno
770 * represents when the rendering involved will be completed.
771 *
772 * A reference is held on the buffer while on this list.
773 */
774 struct list_head active_list;
775
776 /**
777 * LRU list of objects which are not in the ringbuffer and
778 * are ready to unbind, but are still in the GTT.
779 *
780 * last_rendering_seqno is 0 while an object is in this list.
781 *
782 * A reference is not held on the buffer while on this list,
783 * as merely being GTT-bound shouldn't prevent its being
784 * freed, and we'll pull it off the list in the free path.
785 */
786 struct list_head inactive_list;
787
788 /** LRU list of objects with fence regs on them. */
789 struct list_head fence_list;
790
791 /**
792 * We leave the user IRQ off as much as possible,
793 * but this means that requests will finish and never
794 * be retired once the system goes idle. Set a timer to
795 * fire periodically while the ring is running. When it
796 * fires, go retire requests.
797 */
798 struct delayed_work retire_work;
799
800 /**
801 * Are we in a non-interruptible section of code like
802 * modesetting?
803 */
804 bool interruptible;
805
806 /**
807 * Flag if the X Server, and thus DRM, is not currently in
808 * control of the device.
809 *
810 * This is set between LeaveVT and EnterVT. It needs to be
811 * replaced with a semaphore. It also needs to be
812 * transitioned away from for kernel modesetting.
813 */
814 int suspended;
815
816 /** Bit 6 swizzling required for X tiling */
817 uint32_t bit_6_swizzle_x;
818 /** Bit 6 swizzling required for Y tiling */
819 uint32_t bit_6_swizzle_y;
820
821 /* storage for physical objects */
822 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
823
824 /* accounting, useful for userland debugging */
825 size_t object_memory;
826 u32 object_count;
827 };
828
829 struct i915_gpu_error {
830 /* For hangcheck timer */
831 #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
832 #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
833 struct timer_list hangcheck_timer;
834 int hangcheck_count;
835 uint32_t last_acthd[I915_NUM_RINGS];
836 uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
837
838 /* For reset and error_state handling. */
839 spinlock_t lock;
840 /* Protected by the above dev->gpu_error.lock. */
841 struct drm_i915_error_state *first_error;
842 struct work_struct work;
843
844 unsigned long last_reset;
845
846 /**
847 * State variable and reset counter controlling the reset flow
848 *
849 * Upper bits are for the reset counter. This counter is used by the
850 * wait_seqno code to race-free noticed that a reset event happened and
851 * that it needs to restart the entire ioctl (since most likely the
852 * seqno it waited for won't ever signal anytime soon).
853 *
854 * This is important for lock-free wait paths, where no contended lock
855 * naturally enforces the correct ordering between the bail-out of the
856 * waiter and the gpu reset work code.
857 *
858 * Lowest bit controls the reset state machine: Set means a reset is in
859 * progress. This state will (presuming we don't have any bugs) decay
860 * into either unset (successful reset) or the special WEDGED value (hw
861 * terminally sour). All waiters on the reset_queue will be woken when
862 * that happens.
863 */
864 atomic_t reset_counter;
865
866 /**
867 * Special values/flags for reset_counter
868 *
869 * Note that the code relies on
870 * I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
871 * being true.
872 */
873 #define I915_RESET_IN_PROGRESS_FLAG 1
874 #define I915_WEDGED 0xffffffff
875
876 /**
877 * Waitqueue to signal when the reset has completed. Used by clients
878 * that wait for dev_priv->mm.wedged to settle.
879 */
880 wait_queue_head_t reset_queue;
881
882 /* For gpu hang simulation. */
883 unsigned int stop_rings;
884 };
885
886 enum modeset_restore {
887 MODESET_ON_LID_OPEN,
888 MODESET_DONE,
889 MODESET_SUSPENDED,
890 };
891
892 struct intel_vbt_data {
893 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
894 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
895
896 /* Feature bits */
897 unsigned int int_tv_support:1;
898 unsigned int lvds_dither:1;
899 unsigned int lvds_vbt:1;
900 unsigned int int_crt_support:1;
901 unsigned int lvds_use_ssc:1;
902 unsigned int display_clock_mode:1;
903 unsigned int fdi_rx_polarity_inverted:1;
904 int lvds_ssc_freq;
905 unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
906
907 /* eDP */
908 int edp_rate;
909 int edp_lanes;
910 int edp_preemphasis;
911 int edp_vswing;
912 bool edp_initialized;
913 bool edp_support;
914 int edp_bpp;
915 struct edp_power_seq edp_pps;
916
917 int crt_ddc_pin;
918
919 int child_dev_num;
920 struct child_device_config *child_dev;
921 };
922
923 typedef struct drm_i915_private {
924 struct drm_device *dev;
925 struct kmem_cache *slab;
926
927 const struct intel_device_info *info;
928
929 int relative_constants_mode;
930
931 void __iomem *regs;
932
933 struct drm_i915_gt_funcs gt;
934 /** gt_fifo_count and the subsequent register write are synchronized
935 * with dev->struct_mutex. */
936 unsigned gt_fifo_count;
937 /** forcewake_count is protected by gt_lock */
938 unsigned forcewake_count;
939 /** gt_lock is also taken in irq contexts. */
940 spinlock_t gt_lock;
941
942 struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
943
944
945 /** gmbus_mutex protects against concurrent usage of the single hw gmbus
946 * controller on different i2c buses. */
947 struct mutex gmbus_mutex;
948
949 /**
950 * Base address of the gmbus and gpio block.
951 */
952 uint32_t gpio_mmio_base;
953
954 wait_queue_head_t gmbus_wait_queue;
955
956 struct pci_dev *bridge_dev;
957 struct intel_ring_buffer ring[I915_NUM_RINGS];
958 uint32_t last_seqno, next_seqno;
959
960 drm_dma_handle_t *status_page_dmah;
961 struct resource mch_res;
962
963 atomic_t irq_received;
964
965 /* protects the irq masks */
966 spinlock_t irq_lock;
967
968 /* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
969 struct pm_qos_request pm_qos;
970
971 /* DPIO indirect register protection */
972 struct mutex dpio_lock;
973
974 /** Cached value of IMR to avoid reads in updating the bitfield */
975 u32 irq_mask;
976 u32 gt_irq_mask;
977
978 struct work_struct hotplug_work;
979 bool enable_hotplug_processing;
980 struct {
981 unsigned long hpd_last_jiffies;
982 int hpd_cnt;
983 enum {
984 HPD_ENABLED = 0,
985 HPD_DISABLED = 1,
986 HPD_MARK_DISABLED = 2
987 } hpd_mark;
988 } hpd_stats[HPD_NUM_PINS];
989 u32 hpd_event_bits;
990 struct timer_list hotplug_reenable_timer;
991
992 int num_pch_pll;
993 int num_plane;
994
995 unsigned long cfb_size;
996 unsigned int cfb_fb;
997 enum plane cfb_plane;
998 int cfb_y;
999 struct intel_fbc_work *fbc_work;
1000
1001 struct intel_opregion opregion;
1002 struct intel_vbt_data vbt;
1003
1004 /* overlay */
1005 struct intel_overlay *overlay;
1006 unsigned int sprite_scaling_enabled;
1007
1008 /* backlight */
1009 struct {
1010 int level;
1011 bool enabled;
1012 spinlock_t lock; /* bl registers and the above bl fields */
1013 struct backlight_device *device;
1014 } backlight;
1015
1016 /* LVDS info */
1017 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
1018 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
1019 bool no_aux_handshake;
1020
1021 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
1022 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
1023 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
1024
1025 unsigned int fsb_freq, mem_freq, is_ddr3;
1026
1027 struct workqueue_struct *wq;
1028
1029 /* Display functions */
1030 struct drm_i915_display_funcs display;
1031
1032 /* PCH chipset type */
1033 enum intel_pch pch_type;
1034 unsigned short pch_id;
1035
1036 unsigned long quirks;
1037
1038 enum modeset_restore modeset_restore;
1039 struct mutex modeset_restore_lock;
1040
1041 struct i915_gtt gtt;
1042
1043 struct i915_gem_mm mm;
1044
1045 /* Kernel Modesetting */
1046
1047 struct sdvo_device_mapping sdvo_mappings[2];
1048
1049 struct drm_crtc *plane_to_crtc_mapping[3];
1050 struct drm_crtc *pipe_to_crtc_mapping[3];
1051 wait_queue_head_t pending_flip_queue;
1052
1053 struct intel_pch_pll pch_plls[I915_NUM_PLLS];
1054 struct intel_ddi_plls ddi_plls;
1055
1056 /* Reclocking support */
1057 bool render_reclock_avail;
1058 bool lvds_downclock_avail;
1059 /* indicates the reduced downclock for LVDS*/
1060 int lvds_downclock;
1061 u16 orig_clock;
1062
1063 bool mchbar_need_disable;
1064
1065 struct intel_l3_parity l3_parity;
1066
1067 /* gen6+ rps state */
1068 struct intel_gen6_power_mgmt rps;
1069
1070 /* ilk-only ips/rps state. Everything in here is protected by the global
1071 * mchdev_lock in intel_pm.c */
1072 struct intel_ilk_power_mgmt ips;
1073
1074 enum no_fbc_reason no_fbc_reason;
1075
1076 struct drm_mm_node *compressed_fb;
1077 struct drm_mm_node *compressed_llb;
1078
1079 struct i915_gpu_error gpu_error;
1080
1081 struct drm_i915_gem_object *vlv_pctx;
1082
1083 /* list of fbdev register on this device */
1084 struct intel_fbdev *fbdev;
1085
1086 /*
1087 * The console may be contended at resume, but we don't
1088 * want it to block on it.
1089 */
1090 struct work_struct console_resume_work;
1091
1092 struct drm_property *broadcast_rgb_property;
1093 struct drm_property *force_audio_property;
1094
1095 bool hw_contexts_disabled;
1096 uint32_t hw_context_size;
1097
1098 u32 fdi_rx_config;
1099
1100 struct i915_suspend_saved_registers regfile;
1101
1102 /* Old dri1 support infrastructure, beware the dragons ya fools entering
1103 * here! */
1104 struct i915_dri1_state dri1;
1105 } drm_i915_private_t;
1106
1107 /* Iterate over initialised rings */
1108 #define for_each_ring(ring__, dev_priv__, i__) \
1109 for ((i__) = 0; (i__) < I915_NUM_RINGS; (i__)++) \
1110 if (((ring__) = &(dev_priv__)->ring[(i__)]), intel_ring_initialized((ring__)))
1111
1112 enum hdmi_force_audio {
1113 HDMI_AUDIO_OFF_DVI = -2, /* no aux data for HDMI-DVI converter */
1114 HDMI_AUDIO_OFF, /* force turn off HDMI audio */
1115 HDMI_AUDIO_AUTO, /* trust EDID */
1116 HDMI_AUDIO_ON, /* force turn on HDMI audio */
1117 };
1118
1119 #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
1120
1121 struct drm_i915_gem_object_ops {
1122 /* Interface between the GEM object and its backing storage.
1123 * get_pages() is called once prior to the use of the associated set
1124 * of pages before to binding them into the GTT, and put_pages() is
1125 * called after we no longer need them. As we expect there to be
1126 * associated cost with migrating pages between the backing storage
1127 * and making them available for the GPU (e.g. clflush), we may hold
1128 * onto the pages after they are no longer referenced by the GPU
1129 * in case they may be used again shortly (for example migrating the
1130 * pages to a different memory domain within the GTT). put_pages()
1131 * will therefore most likely be called when the object itself is
1132 * being released or under memory pressure (where we attempt to
1133 * reap pages for the shrinker).
1134 */
1135 int (*get_pages)(struct drm_i915_gem_object *);
1136 void (*put_pages)(struct drm_i915_gem_object *);
1137 };
1138
1139 struct drm_i915_gem_object {
1140 struct drm_gem_object base;
1141
1142 const struct drm_i915_gem_object_ops *ops;
1143
1144 /** Current space allocated to this object in the GTT, if any. */
1145 struct drm_mm_node *gtt_space;
1146 /** Stolen memory for this object, instead of being backed by shmem. */
1147 struct drm_mm_node *stolen;
1148 struct list_head gtt_list;
1149
1150 /** This object's place on the active/inactive lists */
1151 struct list_head ring_list;
1152 struct list_head mm_list;
1153 /** This object's place in the batchbuffer or on the eviction list */
1154 struct list_head exec_list;
1155
1156 /**
1157 * This is set if the object is on the active lists (has pending
1158 * rendering and so a non-zero seqno), and is not set if it i s on
1159 * inactive (ready to be unbound) list.
1160 */
1161 unsigned int active:1;
1162
1163 /**
1164 * This is set if the object has been written to since last bound
1165 * to the GTT
1166 */
1167 unsigned int dirty:1;
1168
1169 /**
1170 * Fence register bits (if any) for this object. Will be set
1171 * as needed when mapped into the GTT.
1172 * Protected by dev->struct_mutex.
1173 */
1174 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
1175
1176 /**
1177 * Advice: are the backing pages purgeable?
1178 */
1179 unsigned int madv:2;
1180
1181 /**
1182 * Current tiling mode for the object.
1183 */
1184 unsigned int tiling_mode:2;
1185 /**
1186 * Whether the tiling parameters for the currently associated fence
1187 * register have changed. Note that for the purposes of tracking
1188 * tiling changes we also treat the unfenced register, the register
1189 * slot that the object occupies whilst it executes a fenced
1190 * command (such as BLT on gen2/3), as a "fence".
1191 */
1192 unsigned int fence_dirty:1;
1193
1194 /** How many users have pinned this object in GTT space. The following
1195 * users can each hold at most one reference: pwrite/pread, pin_ioctl
1196 * (via user_pin_count), execbuffer (objects are not allowed multiple
1197 * times for the same batchbuffer), and the framebuffer code. When
1198 * switching/pageflipping, the framebuffer code has at most two buffers
1199 * pinned per crtc.
1200 *
1201 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
1202 * bits with absolutely no headroom. So use 4 bits. */
1203 unsigned int pin_count:4;
1204 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
1205
1206 /**
1207 * Is the object at the current location in the gtt mappable and
1208 * fenceable? Used to avoid costly recalculations.
1209 */
1210 unsigned int map_and_fenceable:1;
1211
1212 /**
1213 * Whether the current gtt mapping needs to be mappable (and isn't just
1214 * mappable by accident). Track pin and fault separate for a more
1215 * accurate mappable working set.
1216 */
1217 unsigned int fault_mappable:1;
1218 unsigned int pin_mappable:1;
1219
1220 /*
1221 * Is the GPU currently using a fence to access this buffer,
1222 */
1223 unsigned int pending_fenced_gpu_access:1;
1224 unsigned int fenced_gpu_access:1;
1225
1226 unsigned int cache_level:2;
1227
1228 unsigned int has_aliasing_ppgtt_mapping:1;
1229 unsigned int has_global_gtt_mapping:1;
1230 unsigned int has_dma_mapping:1;
1231
1232 struct sg_table *pages;
1233 int pages_pin_count;
1234
1235 /* prime dma-buf support */
1236 void *dma_buf_vmapping;
1237 int vmapping_count;
1238
1239 /**
1240 * Used for performing relocations during execbuffer insertion.
1241 */
1242 struct hlist_node exec_node;
1243 unsigned long exec_handle;
1244 struct drm_i915_gem_exec_object2 *exec_entry;
1245
1246 /**
1247 * Current offset of the object in GTT space.
1248 *
1249 * This is the same as gtt_space->start
1250 */
1251 uint32_t gtt_offset;
1252
1253 struct intel_ring_buffer *ring;
1254
1255 /** Breadcrumb of last rendering to the buffer. */
1256 uint32_t last_read_seqno;
1257 uint32_t last_write_seqno;
1258 /** Breadcrumb of last fenced GPU access to the buffer. */
1259 uint32_t last_fenced_seqno;
1260
1261 /** Current tiling stride for the object, if it's tiled. */
1262 uint32_t stride;
1263
1264 /** Record of address bit 17 of each page at last unbind. */
1265 unsigned long *bit_17;
1266
1267 /** User space pin count and filp owning the pin */
1268 uint32_t user_pin_count;
1269 struct drm_file *pin_filp;
1270
1271 /** for phy allocated objects */
1272 struct drm_i915_gem_phys_object *phys_obj;
1273 };
1274 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
1275
1276 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
1277
1278 /**
1279 * Request queue structure.
1280 *
1281 * The request queue allows us to note sequence numbers that have been emitted
1282 * and may be associated with active buffers to be retired.
1283 *
1284 * By keeping this list, we can avoid having to do questionable
1285 * sequence-number comparisons on buffer last_rendering_seqnos, and associate
1286 * an emission time with seqnos for tracking how far ahead of the GPU we are.
1287 */
1288 struct drm_i915_gem_request {
1289 /** On Which ring this request was generated */
1290 struct intel_ring_buffer *ring;
1291
1292 /** GEM sequence number associated with this request. */
1293 uint32_t seqno;
1294
1295 /** Postion in the ringbuffer of the end of the request */
1296 u32 tail;
1297
1298 /** Context related to this request */
1299 struct i915_hw_context *ctx;
1300
1301 /** Time at which this request was emitted, in jiffies. */
1302 unsigned long emitted_jiffies;
1303
1304 /** global list entry for this request */
1305 struct list_head list;
1306
1307 struct drm_i915_file_private *file_priv;
1308 /** file_priv list entry for this request */
1309 struct list_head client_list;
1310 };
1311
1312 struct drm_i915_file_private {
1313 struct {
1314 spinlock_t lock;
1315 struct list_head request_list;
1316 } mm;
1317 struct idr context_idr;
1318 };
1319
1320 #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
1321
1322 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
1323 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
1324 #define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
1325 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
1326 #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
1327 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
1328 #define IS_I945G(dev) ((dev)->pci_device == 0x2772)
1329 #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
1330 #define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
1331 #define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
1332 #define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
1333 #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
1334 #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
1335 #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
1336 #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
1337 #define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
1338 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
1339 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
1340 #define IS_IVYBRIDGE(dev) (INTEL_INFO(dev)->is_ivybridge)
1341 #define IS_IVB_GT1(dev) ((dev)->pci_device == 0x0156 || \
1342 (dev)->pci_device == 0x0152 || \
1343 (dev)->pci_device == 0x015a)
1344 #define IS_SNB_GT1(dev) ((dev)->pci_device == 0x0102 || \
1345 (dev)->pci_device == 0x0106 || \
1346 (dev)->pci_device == 0x010A)
1347 #define IS_VALLEYVIEW(dev) (INTEL_INFO(dev)->is_valleyview)
1348 #define IS_HASWELL(dev) (INTEL_INFO(dev)->is_haswell)
1349 #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
1350 #define IS_ULT(dev) (IS_HASWELL(dev) && \
1351 ((dev)->pci_device & 0xFF00) == 0x0A00)
1352
1353 /*
1354 * The genX designation typically refers to the render engine, so render
1355 * capability related checks should use IS_GEN, while display and other checks
1356 * have their own (e.g. HAS_PCH_SPLIT for ILK+ display, IS_foo for particular
1357 * chips, etc.).
1358 */
1359 #define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
1360 #define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
1361 #define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
1362 #define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
1363 #define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
1364 #define IS_GEN7(dev) (INTEL_INFO(dev)->gen == 7)
1365
1366 #define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
1367 #define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
1368 #define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
1369 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
1370
1371 #define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
1372 #define HAS_ALIASING_PPGTT(dev) (INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev))
1373
1374 #define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
1375 #define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
1376
1377 /* Early gen2 have a totally busted CS tlb and require pinned batches. */
1378 #define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
1379
1380 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
1381 * rows, which changed the alignment requirements and fence programming.
1382 */
1383 #define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
1384 IS_I915GM(dev)))
1385 #define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
1386 #define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
1387 #define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
1388 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
1389 #define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
1390 #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
1391 /* dsparb controlled by hw only */
1392 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
1393
1394 #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
1395 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
1396 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
1397
1398 #define HAS_PIPE_CONTROL(dev) (INTEL_INFO(dev)->gen >= 5)
1399
1400 #define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
1401 #define HAS_POWER_WELL(dev) (IS_HASWELL(dev))
1402 #define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
1403
1404 #define INTEL_PCH_DEVICE_ID_MASK 0xff00
1405 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
1406 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
1407 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00
1408 #define INTEL_PCH_LPT_DEVICE_ID_TYPE 0x8c00
1409 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
1410
1411 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1412 #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
1413 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1414 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1415 #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)
1416 #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE)
1417
1418 #define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1419
1420 #define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
1421
1422 #define GT_FREQUENCY_MULTIPLIER 50
1423
1424 #include "i915_trace.h"
1425
1426 /**
1427 * RC6 is a special power stage which allows the GPU to enter an very
1428 * low-voltage mode when idle, using down to 0V while at this stage. This
1429 * stage is entered automatically when the GPU is idle when RC6 support is
1430 * enabled, and as soon as new workload arises GPU wakes up automatically as well.
1431 *
1432 * There are different RC6 modes available in Intel GPU, which differentiate
1433 * among each other with the latency required to enter and leave RC6 and
1434 * voltage consumed by the GPU in different states.
1435 *
1436 * The combination of the following flags define which states GPU is allowed
1437 * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and
1438 * RC6pp is deepest RC6. Their support by hardware varies according to the
1439 * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one
1440 * which brings the most power savings; deeper states save more power, but
1441 * require higher latency to switch to and wake up.
1442 */
1443 #define INTEL_RC6_ENABLE (1<<0)
1444 #define INTEL_RC6p_ENABLE (1<<1)
1445 #define INTEL_RC6pp_ENABLE (1<<2)
1446
1447 extern struct drm_ioctl_desc i915_ioctls[];
1448 extern int i915_max_ioctl;
1449 extern unsigned int i915_fbpercrtc __always_unused;
1450 extern int i915_panel_ignore_lid __read_mostly;
1451 extern unsigned int i915_powersave __read_mostly;
1452 extern int i915_semaphores __read_mostly;
1453 extern unsigned int i915_lvds_downclock __read_mostly;
1454 extern int i915_lvds_channel_mode __read_mostly;
1455 extern int i915_panel_use_ssc __read_mostly;
1456 extern int i915_vbt_sdvo_panel_type __read_mostly;
1457 extern int i915_enable_rc6 __read_mostly;
1458 extern int i915_enable_fbc __read_mostly;
1459 extern bool i915_enable_hangcheck __read_mostly;
1460 extern int i915_enable_ppgtt __read_mostly;
1461 extern unsigned int i915_preliminary_hw_support __read_mostly;
1462 extern int i915_disable_power_well __read_mostly;
1463
1464 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
1465 extern int i915_resume(struct drm_device *dev);
1466 extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
1467 extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master);
1468
1469 /* i915_dma.c */
1470 void i915_update_dri1_breadcrumb(struct drm_device *dev);
1471 extern void i915_kernel_lost_context(struct drm_device * dev);
1472 extern int i915_driver_load(struct drm_device *, unsigned long flags);
1473 extern int i915_driver_unload(struct drm_device *);
1474 extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv);
1475 extern void i915_driver_lastclose(struct drm_device * dev);
1476 extern void i915_driver_preclose(struct drm_device *dev,
1477 struct drm_file *file_priv);
1478 extern void i915_driver_postclose(struct drm_device *dev,
1479 struct drm_file *file_priv);
1480 extern int i915_driver_device_is_agp(struct drm_device * dev);
1481 #ifdef CONFIG_COMPAT
1482 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
1483 unsigned long arg);
1484 #endif
1485 extern int i915_emit_box(struct drm_device *dev,
1486 struct drm_clip_rect *box,
1487 int DR1, int DR4);
1488 extern int intel_gpu_reset(struct drm_device *dev);
1489 extern int i915_reset(struct drm_device *dev);
1490 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
1491 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
1492 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
1493 extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
1494
1495 extern void intel_console_resume(struct work_struct *work);
1496
1497 /* i915_irq.c */
1498 void i915_hangcheck_elapsed(unsigned long data);
1499 void i915_handle_error(struct drm_device *dev, bool wedged);
1500
1501 extern void intel_irq_init(struct drm_device *dev);
1502 extern void intel_hpd_init(struct drm_device *dev);
1503 extern void intel_gt_init(struct drm_device *dev);
1504 extern void intel_gt_reset(struct drm_device *dev);
1505
1506 void i915_error_state_free(struct kref *error_ref);
1507
1508 void
1509 i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1510
1511 void
1512 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
1513
1514 #ifdef CONFIG_DEBUG_FS
1515 extern void i915_destroy_error_state(struct drm_device *dev);
1516 #else
1517 #define i915_destroy_error_state(x)
1518 #endif
1519
1520
1521 /* i915_gem.c */
1522 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
1523 struct drm_file *file_priv);
1524 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
1525 struct drm_file *file_priv);
1526 int i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1527 struct drm_file *file_priv);
1528 int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1529 struct drm_file *file_priv);
1530 int i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1531 struct drm_file *file_priv);
1532 int i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1533 struct drm_file *file_priv);
1534 int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1535 struct drm_file *file_priv);
1536 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1537 struct drm_file *file_priv);
1538 int i915_gem_execbuffer(struct drm_device *dev, void *data,
1539 struct drm_file *file_priv);
1540 int i915_gem_execbuffer2(struct drm_device *dev, void *data,
1541 struct drm_file *file_priv);
1542 int i915_gem_pin_ioctl(struct drm_device *dev, void *data,
1543 struct drm_file *file_priv);
1544 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
1545 struct drm_file *file_priv);
1546 int i915_gem_busy_ioctl(struct drm_device *dev, void *data,
1547 struct drm_file *file_priv);
1548 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
1549 struct drm_file *file);
1550 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
1551 struct drm_file *file);
1552 int i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
1553 struct drm_file *file_priv);
1554 int i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
1555 struct drm_file *file_priv);
1556 int i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
1557 struct drm_file *file_priv);
1558 int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
1559 struct drm_file *file_priv);
1560 int i915_gem_set_tiling(struct drm_device *dev, void *data,
1561 struct drm_file *file_priv);
1562 int i915_gem_get_tiling(struct drm_device *dev, void *data,
1563 struct drm_file *file_priv);
1564 int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
1565 struct drm_file *file_priv);
1566 int i915_gem_wait_ioctl(struct drm_device *dev, void *data,
1567 struct drm_file *file_priv);
1568 void i915_gem_load(struct drm_device *dev);
1569 void *i915_gem_object_alloc(struct drm_device *dev);
1570 void i915_gem_object_free(struct drm_i915_gem_object *obj);
1571 int i915_gem_init_object(struct drm_gem_object *obj);
1572 void i915_gem_object_init(struct drm_i915_gem_object *obj,
1573 const struct drm_i915_gem_object_ops *ops);
1574 struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
1575 size_t size);
1576 void i915_gem_free_object(struct drm_gem_object *obj);
1577
1578 int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
1579 uint32_t alignment,
1580 bool map_and_fenceable,
1581 bool nonblocking);
1582 void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
1583 int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
1584 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
1585 void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
1586 void i915_gem_lastclose(struct drm_device *dev);
1587
1588 int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
1589 static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
1590 {
1591 struct sg_page_iter sg_iter;
1592
1593 for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, n)
1594 return sg_page_iter_page(&sg_iter);
1595
1596 return NULL;
1597 }
1598 static inline void i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
1599 {
1600 BUG_ON(obj->pages == NULL);
1601 obj->pages_pin_count++;
1602 }
1603 static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
1604 {
1605 BUG_ON(obj->pages_pin_count == 0);
1606 obj->pages_pin_count--;
1607 }
1608
1609 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
1610 int i915_gem_object_sync(struct drm_i915_gem_object *obj,
1611 struct intel_ring_buffer *to);
1612 void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1613 struct intel_ring_buffer *ring);
1614
1615 int i915_gem_dumb_create(struct drm_file *file_priv,
1616 struct drm_device *dev,
1617 struct drm_mode_create_dumb *args);
1618 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
1619 uint32_t handle, uint64_t *offset);
1620 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
1621 uint32_t handle);
1622 /**
1623 * Returns true if seq1 is later than seq2.
1624 */
1625 static inline bool
1626 i915_seqno_passed(uint32_t seq1, uint32_t seq2)
1627 {
1628 return (int32_t)(seq1 - seq2) >= 0;
1629 }
1630
1631 int __must_check i915_gem_get_seqno(struct drm_device *dev, u32 *seqno);
1632 int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);
1633 int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);
1634 int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
1635
1636 static inline bool
1637 i915_gem_object_pin_fence(struct drm_i915_gem_object *obj)
1638 {
1639 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1640 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1641 dev_priv->fence_regs[obj->fence_reg].pin_count++;
1642 return true;
1643 } else
1644 return false;
1645 }
1646
1647 static inline void
1648 i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
1649 {
1650 if (obj->fence_reg != I915_FENCE_REG_NONE) {
1651 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
1652 dev_priv->fence_regs[obj->fence_reg].pin_count--;
1653 }
1654 }
1655
1656 void i915_gem_retire_requests(struct drm_device *dev);
1657 void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
1658 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
1659 bool interruptible);
1660 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
1661 {
1662 return unlikely(atomic_read(&error->reset_counter)
1663 & I915_RESET_IN_PROGRESS_FLAG);
1664 }
1665
1666 static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
1667 {
1668 return atomic_read(&error->reset_counter) == I915_WEDGED;
1669 }
1670
1671 void i915_gem_reset(struct drm_device *dev);
1672 void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
1673 int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
1674 uint32_t read_domains,
1675 uint32_t write_domain);
1676 int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
1677 int __must_check i915_gem_init(struct drm_device *dev);
1678 int __must_check i915_gem_init_hw(struct drm_device *dev);
1679 void i915_gem_l3_remap(struct drm_device *dev);
1680 void i915_gem_init_swizzling(struct drm_device *dev);
1681 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1682 int __must_check i915_gpu_idle(struct drm_device *dev);
1683 int __must_check i915_gem_idle(struct drm_device *dev);
1684 int i915_add_request(struct intel_ring_buffer *ring,
1685 struct drm_file *file,
1686 u32 *seqno);
1687 int __must_check i915_wait_seqno(struct intel_ring_buffer *ring,
1688 uint32_t seqno);
1689 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
1690 int __must_check
1691 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
1692 bool write);
1693 int __must_check
1694 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);
1695 int __must_check
1696 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
1697 u32 alignment,
1698 struct intel_ring_buffer *pipelined);
1699 int i915_gem_attach_phys_object(struct drm_device *dev,
1700 struct drm_i915_gem_object *obj,
1701 int id,
1702 int align);
1703 void i915_gem_detach_phys_object(struct drm_device *dev,
1704 struct drm_i915_gem_object *obj);
1705 void i915_gem_free_all_phys_object(struct drm_device *dev);
1706 void i915_gem_release(struct drm_device *dev, struct drm_file *file);
1707
1708 uint32_t
1709 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
1710 uint32_t
1711 i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
1712 int tiling_mode, bool fenced);
1713
1714 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
1715 enum i915_cache_level cache_level);
1716
1717 struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
1718 struct dma_buf *dma_buf);
1719
1720 struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
1721 struct drm_gem_object *gem_obj, int flags);
1722
1723 /* i915_gem_context.c */
1724 void i915_gem_context_init(struct drm_device *dev);
1725 void i915_gem_context_fini(struct drm_device *dev);
1726 void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
1727 int i915_switch_context(struct intel_ring_buffer *ring,
1728 struct drm_file *file, int to_id);
1729 void i915_gem_context_free(struct kref *ctx_ref);
1730 static inline void i915_gem_context_reference(struct i915_hw_context *ctx)
1731 {
1732 kref_get(&ctx->ref);
1733 }
1734
1735 static inline void i915_gem_context_unreference(struct i915_hw_context *ctx)
1736 {
1737 kref_put(&ctx->ref, i915_gem_context_free);
1738 }
1739
1740 int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
1741 struct drm_file *file);
1742 int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
1743 struct drm_file *file);
1744
1745 /* i915_gem_gtt.c */
1746 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
1747 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
1748 struct drm_i915_gem_object *obj,
1749 enum i915_cache_level cache_level);
1750 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
1751 struct drm_i915_gem_object *obj);
1752
1753 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
1754 int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj);
1755 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
1756 enum i915_cache_level cache_level);
1757 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
1758 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj);
1759 void i915_gem_init_global_gtt(struct drm_device *dev);
1760 void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
1761 unsigned long mappable_end, unsigned long end);
1762 int i915_gem_gtt_init(struct drm_device *dev);
1763 static inline void i915_gem_chipset_flush(struct drm_device *dev)
1764 {
1765 if (INTEL_INFO(dev)->gen < 6)
1766 intel_gtt_chipset_flush();
1767 }
1768
1769
1770 /* i915_gem_evict.c */
1771 int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
1772 unsigned alignment,
1773 unsigned cache_level,
1774 bool mappable,
1775 bool nonblock);
1776 int i915_gem_evict_everything(struct drm_device *dev);
1777
1778 /* i915_gem_stolen.c */
1779 int i915_gem_init_stolen(struct drm_device *dev);
1780 int i915_gem_stolen_setup_compression(struct drm_device *dev, int size);
1781 void i915_gem_stolen_cleanup_compression(struct drm_device *dev);
1782 void i915_gem_cleanup_stolen(struct drm_device *dev);
1783 struct drm_i915_gem_object *
1784 i915_gem_object_create_stolen(struct drm_device *dev, u32 size);
1785 struct drm_i915_gem_object *
1786 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
1787 u32 stolen_offset,
1788 u32 gtt_offset,
1789 u32 size);
1790 void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);
1791
1792 /* i915_gem_tiling.c */
1793 inline static bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
1794 {
1795 drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
1796
1797 return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
1798 obj->tiling_mode != I915_TILING_NONE;
1799 }
1800
1801 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1802 void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
1803 void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
1804
1805 /* i915_gem_debug.c */
1806 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1807 const char *where, uint32_t mark);
1808 #if WATCH_LISTS
1809 int i915_verify_lists(struct drm_device *dev);
1810 #else
1811 #define i915_verify_lists(dev) 0
1812 #endif
1813 void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
1814 int handle);
1815 void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
1816 const char *where, uint32_t mark);
1817
1818 /* i915_debugfs.c */
1819 int i915_debugfs_init(struct drm_minor *minor);
1820 void i915_debugfs_cleanup(struct drm_minor *minor);
1821
1822 /* i915_suspend.c */
1823 extern int i915_save_state(struct drm_device *dev);
1824 extern int i915_restore_state(struct drm_device *dev);
1825
1826 /* i915_ums.c */
1827 void i915_save_display_reg(struct drm_device *dev);
1828 void i915_restore_display_reg(struct drm_device *dev);
1829
1830 /* i915_sysfs.c */
1831 void i915_setup_sysfs(struct drm_device *dev_priv);
1832 void i915_teardown_sysfs(struct drm_device *dev_priv);
1833
1834 /* intel_i2c.c */
1835 extern int intel_setup_gmbus(struct drm_device *dev);
1836 extern void intel_teardown_gmbus(struct drm_device *dev);
1837 static inline bool intel_gmbus_is_port_valid(unsigned port)
1838 {
1839 return (port >= GMBUS_PORT_SSC && port <= GMBUS_PORT_DPD);
1840 }
1841
1842 extern struct i2c_adapter *intel_gmbus_get_adapter(
1843 struct drm_i915_private *dev_priv, unsigned port);
1844 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
1845 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
1846 static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
1847 {
1848 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
1849 }
1850 extern void intel_i2c_reset(struct drm_device *dev);
1851
1852 /* intel_opregion.c */
1853 extern int intel_opregion_setup(struct drm_device *dev);
1854 #ifdef CONFIG_ACPI
1855 extern void intel_opregion_init(struct drm_device *dev);
1856 extern void intel_opregion_fini(struct drm_device *dev);
1857 extern void intel_opregion_asle_intr(struct drm_device *dev);
1858 #else
1859 static inline void intel_opregion_init(struct drm_device *dev) { return; }
1860 static inline void intel_opregion_fini(struct drm_device *dev) { return; }
1861 static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
1862 #endif
1863
1864 /* intel_acpi.c */
1865 #ifdef CONFIG_ACPI
1866 extern void intel_register_dsm_handler(void);
1867 extern void intel_unregister_dsm_handler(void);
1868 #else
1869 static inline void intel_register_dsm_handler(void) { return; }
1870 static inline void intel_unregister_dsm_handler(void) { return; }
1871 #endif /* CONFIG_ACPI */
1872
1873 /* modesetting */
1874 extern void intel_modeset_init_hw(struct drm_device *dev);
1875 extern void intel_modeset_suspend_hw(struct drm_device *dev);
1876 extern void intel_modeset_init(struct drm_device *dev);
1877 extern void intel_modeset_gem_init(struct drm_device *dev);
1878 extern void intel_modeset_cleanup(struct drm_device *dev);
1879 extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
1880 extern void intel_modeset_setup_hw_state(struct drm_device *dev,
1881 bool force_restore);
1882 extern void i915_redisable_vga(struct drm_device *dev);
1883 extern bool intel_fbc_enabled(struct drm_device *dev);
1884 extern void intel_disable_fbc(struct drm_device *dev);
1885 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
1886 extern void intel_init_pch_refclk(struct drm_device *dev);
1887 extern void gen6_set_rps(struct drm_device *dev, u8 val);
1888 extern void valleyview_set_rps(struct drm_device *dev, u8 val);
1889 extern int valleyview_rps_max_freq(struct drm_i915_private *dev_priv);
1890 extern int valleyview_rps_min_freq(struct drm_i915_private *dev_priv);
1891 extern void intel_detect_pch(struct drm_device *dev);
1892 extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
1893 extern int intel_enable_rc6(const struct drm_device *dev);
1894
1895 extern bool i915_semaphore_is_enabled(struct drm_device *dev);
1896 int i915_reg_read_ioctl(struct drm_device *dev, void *data,
1897 struct drm_file *file);
1898
1899 /* overlay */
1900 #ifdef CONFIG_DEBUG_FS
1901 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
1902 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
1903
1904 extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
1905 extern void intel_display_print_error_state(struct seq_file *m,
1906 struct drm_device *dev,
1907 struct intel_display_error_state *error);
1908 #endif
1909
1910 /* On SNB platform, before reading ring registers forcewake bit
1911 * must be set to prevent GT core from power down and stale values being
1912 * returned.
1913 */
1914 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
1915 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
1916 int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
1917
1918 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);
1919 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val);
1920 int valleyview_punit_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
1921 int valleyview_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);
1922 int valleyview_nc_read(struct drm_i915_private *dev_priv, u8 addr, u32 *val);
1923
1924 int vlv_gpu_freq(int ddr_freq, int val);
1925 int vlv_freq_opcode(int ddr_freq, int val);
1926
1927 #define __i915_read(x, y) \
1928 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
1929
1930 __i915_read(8, b)
1931 __i915_read(16, w)
1932 __i915_read(32, l)
1933 __i915_read(64, q)
1934 #undef __i915_read
1935
1936 #define __i915_write(x, y) \
1937 void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
1938
1939 __i915_write(8, b)
1940 __i915_write(16, w)
1941 __i915_write(32, l)
1942 __i915_write(64, q)
1943 #undef __i915_write
1944
1945 #define I915_READ8(reg) i915_read8(dev_priv, (reg))
1946 #define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
1947
1948 #define I915_READ16(reg) i915_read16(dev_priv, (reg))
1949 #define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
1950 #define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
1951 #define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
1952
1953 #define I915_READ(reg) i915_read32(dev_priv, (reg))
1954 #define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
1955 #define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
1956 #define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
1957
1958 #define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
1959 #define I915_READ64(reg) i915_read64(dev_priv, (reg))
1960
1961 #define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
1962 #define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
1963
1964 /* "Broadcast RGB" property */
1965 #define INTEL_BROADCAST_RGB_AUTO 0
1966 #define INTEL_BROADCAST_RGB_FULL 1
1967 #define INTEL_BROADCAST_RGB_LIMITED 2
1968
1969 static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
1970 {
1971 if (HAS_PCH_SPLIT(dev))
1972 return CPU_VGACNTRL;
1973 else if (IS_VALLEYVIEW(dev))
1974 return VLV_VGACNTRL;
1975 else
1976 return VGACNTRL;
1977 }
1978
1979 static inline void __user *to_user_ptr(u64 address)
1980 {
1981 return (void __user *)(uintptr_t)address;
1982 }
1983
1984 #endif
This page took 0.072265 seconds and 6 git commands to generate.