#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20150423"
+#define DRIVER_DATE "20150508"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#define for_each_crtc(dev, crtc) \
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
+#define for_each_intel_plane(dev, intel_plane) \
+ list_for_each_entry(intel_plane, \
+ &dev->mode_config.plane_list, \
+ base.head)
+
#define for_each_intel_crtc(dev, intel_crtc) \
list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)
/* skl */
/*
* DPLL_CTRL1 has 6 bits for each each this DPLL. We store those in
- * lower part of crtl1 and they get shifted into position when writing
+ * lower part of ctrl1 and they get shifted into position when writing
* the register. This allows us to easily compare the state to share
* the DPLL.
*/
uint32_t cfgcr1, cfgcr2;
/* bxt */
- uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pcsdw12;
+ uint32_t ebb0, pll0, pll1, pll2, pll3, pll6, pll8, pll10, pcsdw12;
};
struct intel_shared_dpll_config {
struct drm_i915_error_buffer {
u32 size;
u32 name;
- u32 rseqno, wseqno;
+ u32 rseqno[I915_NUM_RINGS], wseqno;
u32 gtt_offset;
u32 read_domains;
u32 write_domain;
#define for_each_fw_domain(domain__, dev_priv__, i__) \
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
+enum csr_state {
+ FW_UNINITIALIZED = 0,
+ FW_LOADED,
+ FW_FAILED
+};
+
+struct intel_csr {
+ const char *fw_path;
+ __be32 *dmc_payload;
+ uint32_t dmc_fw_size;
+ uint32_t mmio_count;
+ uint32_t mmioaddr[8];
+ uint32_t mmiodata[8];
+ enum csr_state state;
+};
+
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
func(is_mobile) sep \
func(is_i85x) sep \
/* Display 2 CZ domain */
u32 gu_ctl0;
u32 gu_ctl1;
+ u32 pcbr;
u32 clock_gate_dis2;
};
bool edp_initialized;
bool edp_support;
int edp_bpp;
- bool edp_low_vswing;
struct edp_power_seq edp_pps;
struct {
struct skl_ddb_allocation {
struct skl_ddb_entry pipe[I915_MAX_PIPES];
- struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES];
+ struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */
+ struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */
struct skl_ddb_entry cursor[I915_MAX_PIPES];
};
struct i915_virtual_gpu vgpu;
+ struct intel_csr csr;
+
+ /* Display CSR-related protection */
+ struct mutex csr_lock;
+
struct intel_gmbus gmbus[GMBUS_NUM_PINS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
u32 fdi_rx_config;
+ u32 chv_phy_control;
+
u32 suspend_count;
struct i915_suspend_saved_registers regfile;
struct vlv_s0ix_state vlv_s0ix_state;
void (*stop_ring)(struct intel_engine_cs *ring);
} gt;
+ bool edp_low_vswing;
+
/*
* NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch
* will be rejected. Instead look for a better place.
struct drm_mm_node *stolen;
struct list_head global_list;
- struct list_head ring_list;
+ struct list_head ring_list[I915_NUM_RINGS];
/** Used in execbuf to temporarily hold a ref */
struct list_head obj_exec_link;
* rendering and so a non-zero seqno), and is not set if it i s on
* inactive (ready to be unbound) list.
*/
- unsigned int active:1;
+ unsigned int active:I915_NUM_RINGS;
/**
* This is set if the object has been written to since last bound
void *dma_buf_vmapping;
int vmapping_count;
- /** Breadcrumb of last rendering to the buffer. */
- struct drm_i915_gem_request *last_read_req;
+ /** Breadcrumb of last rendering to the buffer.
+ * There can only be one writer, but we allow for multiple readers.
+ * If there is a writer that necessarily implies that all other
+ * read requests are complete - but we may only be lazily clearing
+ * the read requests. A read request is naturally the most recent
+ * request on a ring, so we may have two different write and read
+ * requests on one ring where the write request is older than the
+ * read request. This allows for the CPU to read from an active
+ * buffer by only waiting for the write to complete.
+ * */
+ struct drm_i915_gem_request *last_read_req[I915_NUM_RINGS];
struct drm_i915_gem_request *last_write_req;
/** Breadcrumb of last fenced GPU access to the buffer. */
struct drm_i915_gem_request *last_fenced_req;
return req ? req->ring : NULL;
}
-static inline void
+static inline struct drm_i915_gem_request *
i915_gem_request_reference(struct drm_i915_gem_request *req)
{
- kref_get(&req->ref);
+ if (req)
+ kref_get(&req->ref);
+ return req;
}
static inline void
#define SKL_REVID_C0 (0x2)
#define SKL_REVID_D0 (0x3)
#define SKL_REVID_E0 (0x4)
+#define SKL_REVID_F0 (0x5)
#define BXT_REVID_A0 (0x0)
#define BXT_REVID_B0 (0x3)
#define HAS_IPS(dev) (IS_HSW_ULT(dev) || IS_BROADWELL(dev))
+#define HAS_DP_MST(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
+ INTEL_INFO(dev)->gen >= 9)
+
#define HAS_DDI(dev) (INTEL_INFO(dev)->has_ddi)
#define HAS_FPGA_DBG_UNCLAIMED(dev) (INTEL_INFO(dev)->has_fpga_dbg)
#define HAS_PSR(dev) (IS_HASWELL(dev) || IS_BROADWELL(dev) || \
IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev) || \
IS_SKYLAKE(dev))
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
- IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))
+ IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
+ IS_SKYLAKE(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
+#define HAS_CSR(dev) (IS_SKYLAKE(dev))
+
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
int mmio_debug;
bool verbose_state_checks;
bool nuclear_pageflip;
+ int edp_vswing;
};
extern struct i915_params i915 __read_mostly;
extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
void intel_hpd_cancel_work(struct drm_i915_private *dev_priv);
+void i915_firmware_load_error_print(const char *fw_path, int err);
/* i915_irq.c */
void i915_queue_hangcheck(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);
-int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_rings(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
int __must_check i915_wait_request(struct drm_i915_gem_request *req);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
int __must_check
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool readonly);
+int __must_check
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
bool write);
int __must_check