#include <uapi/drm/i915_drm.h>
#include <uapi/drm/drm_fourcc.h>
-#include <drm/drmP.h>
-#include "i915_params.h"
-#include "i915_reg.h"
-#include "intel_bios.h"
-#include "intel_ringbuffer.h"
-#include "intel_lrc.h"
-#include "i915_gem_gtt.h"
-#include "i915_gem_render_state.h"
#include <linux/io-mapping.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
-#include <drm/intel-gtt.h>
-#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
-#include <drm/drm_gem.h>
#include <linux/backlight.h>
#include <linux/hashtable.h>
#include <linux/intel-iommu.h>
#include <linux/kref.h>
#include <linux/pm_qos.h>
-#include "intel_guc.h"
+#include <linux/shmem_fs.h>
+
+#include <drm/drmP.h>
+#include <drm/intel-gtt.h>
+#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
+#include <drm/drm_gem.h>
+
+#include "i915_params.h"
+#include "i915_reg.h"
+
+#include "intel_bios.h"
#include "intel_dpll_mgr.h"
+#include "intel_guc.h"
+#include "intel_lrc.h"
+#include "intel_ringbuffer.h"
+
+#include "i915_gem.h"
+#include "i915_gem_gtt.h"
+#include "i915_gem_render_state.h"
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20160411"
+#define DRIVER_DATE "20160425"
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
struct i915_ctx_hang_stats hang_stats;
struct i915_hw_ppgtt *ppgtt;
+ /* Unique identifier for this context, used by the hw for tracking */
+ unsigned hw_id;
+
/* Legacy ring buffer submission */
struct {
struct drm_i915_gem_object *rcs_state;
struct i915_vma *lrc_vma;
u64 lrc_desc;
uint32_t *lrc_reg_state;
+ bool initialised;
} engine[I915_NUM_ENGINES];
struct list_head link;
/* For missed irq/seqno simulation. */
unsigned int test_irq_rings;
-
- /* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
- bool reload_in_reset;
};
enum modeset_restore {
DECLARE_HASHTABLE(mm_structs, 7);
struct mutex mm_lock;
+ /* The hw wants to have a stable context identifier for the lifetime
+ * of the context (for OA, PASID, faults, etc). This is limited
+ * in execlists to 21 bits.
+ */
+ struct ida context_hw_ida;
+#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
+
/* Kernel Modesetting */
struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_l3_parity l3_parity;
/* Cannot be determined by PCIID. You must always read a register. */
- size_t ellc_size;
+ u32 edram_cap;
/* gen6+ rps state */
struct intel_gen6_power_mgmt rps;
* crappiness (can't read out DPLL_MD for pipes B & C).
*/
u32 chv_dpll_md[I915_MAX_PIPES];
+ u32 bxt_phy_grc;
u32 suspend_count;
bool suspended_to_idle;
/** On Which ring this request was generated */
struct drm_i915_private *i915;
struct intel_engine_cs *engine;
+ unsigned reset_counter;
/** GEM sequence number associated with the previous request,
* when the HWS breadcrumb is equal to this the GPU is processing
/** Position in the ringbuffer of the end of the whole request */
u32 tail;
+ /** Preallocate space in the ringbuffer for the emitting the request */
+ u32 reserved_space;
+
/**
* Context and ring buffer related to this request
* Contexts are refcounted, so when this request is associated with a
struct intel_context *ctx;
struct intel_ringbuffer *ringbuf;
+ /**
+ * Context related to the previous request.
+ * As the contexts are accessed by the hardware until the switch is
+ * completed to a new context, the hardware may still be writing
+ * to the context object after the breadcrumb is visible. We must
+ * not unpin/unbind/prune that object whilst still active and so
+ * we keep the previous context pinned until the following (this)
+ * request is retired.
+ */
+ struct intel_context *previous_context;
+
/** Batch buffer related to this request if any (used for
error state dump only) */
struct drm_i915_gem_object *batch_obj;
/** Execlists no. of times this request has been sent to the ELSP */
int elsp_submitted;
+ /** Execlists context hardware id. */
+ unsigned ctx_hw_id;
};
struct drm_i915_gem_request * __must_check
i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx);
-void i915_gem_request_cancel(struct drm_i915_gem_request *req);
void i915_gem_request_free(struct kref *req_ref);
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
struct drm_file *file);
static inline void
i915_gem_request_unreference(struct drm_i915_gem_request *req)
{
- WARN_ON(!mutex_is_locked(&req->engine->dev->struct_mutex));
kref_put(&req->ref, i915_gem_request_free);
}
-static inline void
-i915_gem_request_unreference__unlocked(struct drm_i915_gem_request *req)
-{
- struct drm_device *dev;
-
- if (!req)
- return;
-
- dev = req->engine->dev;
- if (kref_put_mutex(&req->ref, i915_gem_request_free, &dev->struct_mutex))
- mutex_unlock(&dev->struct_mutex);
-}
-
static inline void i915_gem_request_assign(struct drm_i915_gem_request **pdst,
struct drm_i915_gem_request *src)
{
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
+#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
- __I915__(dev)->ellc_size)
+ HAS_EDRAM(dev))
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
/* WaRsDisableCoarsePowerGating:skl,bxt */
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
- ((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
- IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
+ IS_SKL_GT3(dev) || \
+ IS_SKL_GT4(dev))
+
/*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
* even when in MSI mode. This results in spurious interrupt warnings if the
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
- IS_KABYLAKE(dev))
+ IS_KABYLAKE(dev) || IS_BROXTON(dev))
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
enum forcewake_domains domains);
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
enum forcewake_domains domains);
+u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
+
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
static inline bool intel_vgpu_active(struct drm_device *dev)
{
struct drm_file *file_priv);
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
struct drm_i915_gem_request *req);
-void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
struct drm_i915_gem_execbuffer2 *args,
struct list_head *vmas);
void i915_gem_object_free(struct drm_i915_gem_object *obj);
void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops);
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size);
struct drm_i915_gem_object *i915_gem_object_create_from_data(
struct drm_device *dev, const void *data, size_t size);
* pages and then returns a contiguous mapping of the backing storage into
* the kernel address space.
*
- * The caller must hold the struct_mutex.
+ * The caller must hold the struct_mutex, and is responsible for calling
+ * i915_gem_object_unpin_map() when the mapping is no longer required.
*
- * Returns the pointer through which to access the backing storage.
+ * Returns the pointer through which to access the mapped object, or an
+ * ERR_PTR() on error.
*/
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
bool i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
-int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
- bool interruptible);
+
+static inline u32 i915_reset_counter(struct i915_gpu_error *error)
+{
+ return atomic_read(&error->reset_counter);
+}
+
+static inline bool __i915_reset_in_progress(u32 reset)
+{
+ return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
+}
+
+static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
+{
+ return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+}
+
+static inline bool __i915_terminally_wedged(u32 reset)
+{
+ return unlikely(reset & I915_WEDGED);
+}
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
- return unlikely(atomic_read(&error->reset_counter)
- & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
+ return __i915_reset_in_progress(i915_reset_counter(error));
+}
+
+static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
+{
+ return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
- return atomic_read(&error->reset_counter) & I915_WEDGED;
+ return __i915_terminally_wedged(i915_reset_counter(error));
}
static inline u32 i915_reset_count(struct i915_gpu_error *error)
{
- return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
+ return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
}
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
int __must_check i915_gem_init(struct drm_device *dev);
int i915_gem_init_engines(struct drm_device *dev);
int __must_check i915_gem_init_hw(struct drm_device *dev);
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice);
void i915_gem_init_swizzling(struct drm_device *dev);
void i915_gem_cleanup_engines(struct drm_device *dev);
int __must_check i915_gpu_idle(struct drm_device *dev);
#define i915_add_request_no_flush(req) \
__i915_add_request(req, NULL, false)
int __i915_wait_request(struct drm_i915_gem_request *req,
- unsigned reset_counter,
bool interruptible,
s64 *timeout,
struct intel_rps_client *rps);
bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
struct i915_address_space *vm);
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm);
struct i915_vma *
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm);
return i915_gem_obj_ggtt_bound_view(obj, &i915_ggtt_view_normal);
}
-static inline unsigned long
-i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)
-{
- struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
-
- return i915_gem_obj_size(obj, &ggtt->base);
-}
+unsigned long
+i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj);
static inline int __must_check
i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,
/* i915_gem_context.c */
int __must_check i915_gem_context_init(struct drm_device *dev);
+void i915_gem_context_lost(struct drm_i915_private *dev_priv);
void i915_gem_context_fini(struct drm_device *dev);
void i915_gem_context_reset(struct drm_device *dev);
int i915_gem_context_open(struct drm_device *dev, struct drm_file *file);
-int i915_gem_context_enable(struct drm_i915_gem_request *req);
void i915_gem_context_close(struct drm_device *dev, struct drm_file *file);
int i915_switch_context(struct drm_i915_gem_request *req);
struct intel_context *
u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg);
void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val);
+/* intel_dpio_phy.c */
+void chv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 deemph_reg_value, u32 margin_reg_value,
+ bool uniq_trans_scale);
+void chv_data_lane_soft_reset(struct intel_encoder *encoder,
+ bool reset);
+void chv_phy_pre_pll_enable(struct intel_encoder *encoder);
+void chv_phy_pre_encoder_enable(struct intel_encoder *encoder);
+void chv_phy_release_cl2_override(struct intel_encoder *encoder);
+void chv_phy_post_pll_disable(struct intel_encoder *encoder);
+
+void vlv_set_phy_signal_level(struct intel_encoder *encoder,
+ u32 demph_reg_value, u32 preemph_reg_value,
+ u32 uniqtranscale_reg_value, u32 tx3_demph);
+
int intel_gpu_freq(struct drm_i915_private *dev_priv, int val);
int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);