drm/i915: Split i915_gem_execbuffer into its own file.
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_drv.h
index b6b5d08c9956f34944b155459564336cf5b275ee..6c10b645dde9617e27452eeb153c108626d45ba8 100644 (file)
@@ -32,7 +32,6 @@
 
 #include "i915_reg.h"
 #include "intel_bios.h"
-#include "i915_trace.h"
 #include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
@@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object {
        int id;
        struct page **page_list;
        drm_dma_handle_t *handle;
-       struct drm_gem_object *cur_obj;
+       struct drm_i915_gem_object *cur_obj;
 };
 
 struct mem_block {
@@ -125,9 +124,8 @@ struct drm_i915_master_private {
 #define I915_FENCE_REG_NONE -1
 
 struct drm_i915_fence_reg {
-       struct drm_gem_object *obj;
        struct list_head lru_list;
-       bool gpu;
+       struct drm_i915_gem_object *obj;
 };
 
 struct sdvo_device_mapping {
@@ -167,6 +165,7 @@ struct drm_i915_error_state {
        u32 instdone1;
        u32 seqno;
        u64 bbaddr;
+       u64 fence[16];
        struct timeval time;
        struct drm_i915_error_object {
                int page_count;
@@ -275,14 +274,11 @@ typedef struct drm_i915_private {
        uint32_t next_seqno;
 
        drm_dma_handle_t *status_page_dmah;
-       void *seqno_page;
        dma_addr_t dma_status_page;
        uint32_t counter;
-       unsigned int seqno_gfx_addr;
        drm_local_map_t hws_map;
-       struct drm_gem_object *seqno_obj;
-       struct drm_gem_object *pwrctx;
-       struct drm_gem_object *renderctx;
+       struct drm_i915_gem_object *pwrctx;
+       struct drm_i915_gem_object *renderctx;
 
        struct resource mch_res;
 
@@ -318,7 +314,7 @@ typedef struct drm_i915_private {
        int num_pipe;
 
        /* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
        struct timer_list hangcheck_timer;
        int hangcheck_count;
        uint32_t last_acthd;
@@ -646,17 +642,10 @@ typedef struct drm_i915_private {
                struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 
                /* accounting, useful for userland debugging */
-               size_t object_memory;
-               size_t pin_memory;
-               size_t gtt_memory;
-               size_t gtt_mappable_memory;
-               size_t mappable_gtt_used;
-               size_t mappable_gtt_total;
                size_t gtt_total;
+               size_t mappable_gtt_total;
+               size_t object_memory;
                u32 object_count;
-               u32 pin_count;
-               u32 gtt_mappable_count;
-               u32 gtt_count;
        } mm;
        struct sdvo_device_mapping sdvo_mappings[2];
        /* indicate whether the LVDS_BORDER should be enabled or not */
@@ -690,14 +679,14 @@ typedef struct drm_i915_private {
        u8 fmax;
        u8 fstart;
 
-       u64 last_count1;
-       unsigned long last_time1;
-       u64 last_count2;
-       struct timespec last_time2;
-       unsigned long gfx_power;
-       int c_m;
-       int r_t;
-       u8 corr;
+       u64 last_count1;
+       unsigned long last_time1;
+       u64 last_count2;
+       struct timespec last_time2;
+       unsigned long gfx_power;
+       int c_m;
+       int r_t;
+       u8 corr;
        spinlock_t *mchdev_lock;
 
        enum no_fbc_reason no_fbc_reason;
@@ -711,7 +700,6 @@ typedef struct drm_i915_private {
        struct intel_fbdev *fbdev;
 } drm_i915_private_t;
 
-/** driver private structure attached to each drm_gem_object */
 struct drm_i915_gem_object {
        struct drm_gem_object base;
 
@@ -791,11 +779,20 @@ struct drm_i915_gem_object {
        unsigned int fault_mappable : 1;
        unsigned int pin_mappable : 1;
 
-       /** AGP memory structure for our GTT binding. */
-       DRM_AGP_MEM *agp_mem;
+       /*
+        * Is the GPU currently using a fence to access this buffer,
+        */
+       unsigned int pending_fenced_gpu_access:1;
+       unsigned int fenced_gpu_access:1;
 
        struct page **pages;
 
+       /**
+        * DMAR support
+        */
+       struct scatterlist *sg_list;
+       int num_sg;
+
        /**
         * Current offset of the object in GTT space.
         *
@@ -803,11 +800,13 @@ struct drm_i915_gem_object {
         */
        uint32_t gtt_offset;
 
-       /* Which ring is refering to is this object */
-       struct intel_ring_buffer *ring;
-
        /** Breadcrumb of last rendering to the buffer. */
        uint32_t last_rendering_seqno;
+       struct intel_ring_buffer *ring;
+
+       /** Breadcrumb of last fenced GPU access to the buffer. */
+       uint32_t last_fenced_seqno;
+       struct intel_ring_buffer *last_fenced_ring;
 
        /** Current tiling stride for the object, if it's tiled. */
        uint32_t stride;
@@ -915,7 +914,7 @@ enum intel_chip_family {
 #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
-#define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
+#define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
@@ -944,6 +943,8 @@ enum intel_chip_family {
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
+#include "i915_trace.h"
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
@@ -1082,16 +1083,27 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
-                                             size_t size);
+void i915_gem_flush_ring(struct drm_device *dev,
+                        struct intel_ring_buffer *ring,
+                        uint32_t invalidate_domains,
+                        uint32_t flush_domains);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+                                                 size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
-                       bool map_and_fenceable);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                                    uint32_t alignment,
+                                    bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+                                               bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+                                   struct intel_ring_buffer *ring);
+
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -1101,70 +1113,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
        return (int32_t)(seq1 - seq2) >= 0;
 }
 
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
-                                 bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
-                                 bool interruptible);
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+                           struct intel_ring_buffer *ring)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+       return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
+int __must_check i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
+                                              bool interruptible);
+int __must_check i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
+                                              bool interruptible);
+
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
-                              uint32_t read_domains,
-                              uint32_t write_domain);
-int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
-                             bool interruptible);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+                                           uint32_t read_domains,
+                                           uint32_t write_domain);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+                                          bool interruptible);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
-                    unsigned long mappable_end, unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
-int i915_gem_idle(struct drm_device *dev);
-int i915_add_request(struct drm_device *dev,
-                    struct drm_file *file_priv,
-                    struct drm_i915_gem_request *request,
-                    struct intel_ring_buffer *ring);
-int i915_do_wait_request(struct drm_device *dev,
-                        uint32_t seqno,
-                        bool interruptible,
-                        struct intel_ring_buffer *ring);
+void i915_gem_do_init(struct drm_device *dev,
+                     unsigned long start,
+                     unsigned long mappable_end,
+                     unsigned long end);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_add_request(struct drm_device *dev,
+                                 struct drm_file *file_priv,
+                                 struct drm_i915_gem_request *request,
+                                 struct intel_ring_buffer *ring);
+int __must_check i915_do_wait_request(struct drm_device *dev,
+                                     uint32_t seqno,
+                                     bool interruptible,
+                                     struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
-                                     int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
-                                        bool pipelined);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+                                 bool write);
+int __must_check
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+                                    struct intel_ring_buffer *pipelined);
 int i915_gem_attach_phys_object(struct drm_device *dev,
-                               struct drm_gem_object *obj,
+                               struct drm_i915_gem_object *obj,
                                int id,
                                int align);
 void i915_gem_detach_phys_object(struct drm_device *dev,
-                                struct drm_gem_object *obj);
+                                struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 
 /* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size,
-                            unsigned alignment, bool mappable);
-int i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only);
-int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+                                         unsigned alignment, bool mappable);
+int __must_check i915_gem_evict_everything(struct drm_device *dev,
+                                          bool purgeable_only);
+int __must_check i915_gem_evict_inactive(struct drm_device *dev,
+                                        bool purgeable_only);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
 /* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
                          const char *where, uint32_t mark);
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
 #else
 #define i915_verify_lists(dev) 0
 #endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+                                    int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
                          const char *where, uint32_t mark);
 
 /* i915_debugfs.c */
@@ -1246,10 +1276,10 @@ extern void intel_display_print_error_state(struct seq_file *m,
  * In that case, we don't need to do it when GEM is initialized as nobody else
  * has access to the ring.
  */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {                        \
-       if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                     \
+       if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \
                        == NULL)                                        \
-               LOCK_TEST_WITH_RETURN(dev, file_priv);                  \
+               LOCK_TEST_WITH_RETURN(dev, file);                       \
 } while (0)
 
 
This page took 0.031624 seconds and 5 git commands to generate.