X-Git-Url: http://drtracing.org/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fintel_ringbuffer.h;h=0f12020b54e9e6051897a4d0bfedba00c0a0b752;hb=ccd98fe4996cd22094cde7b6a1f7c569f261b3e9;hp=cb6d3d0b25307909743de9f3653301e3b84ddee7;hpb=ee044a8863de58044cb370c23f97b9b68b33e47b;p=deliverable%2Flinux.git diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index cb6d3d0b2530..0f12020b54e9 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -194,7 +194,7 @@ struct intel_engine_cs { bool lazy_coherency); void (*set_seqno)(struct intel_engine_cs *ring, u32 seqno); - int (*dispatch_execbuffer)(struct intel_engine_cs *ring, + int (*dispatch_execbuffer)(struct drm_i915_gem_request *req, u64 offset, u32 length, unsigned dispatch_flags); #define I915_DISPATCH_SECURE 0x1 @@ -252,10 +252,10 @@ struct intel_engine_cs { }; /* AKA wait() */ - int (*sync_to)(struct intel_engine_cs *ring, - struct intel_engine_cs *to, + int (*sync_to)(struct drm_i915_gem_request *to_req, + struct intel_engine_cs *from, u32 seqno); - int (*signal)(struct intel_engine_cs *signaller, + int (*signal)(struct drm_i915_gem_request *signaller_req, /* num_dwords needed by caller */ unsigned int num_dwords); } semaphore; @@ -266,13 +266,11 @@ struct intel_engine_cs { struct list_head execlist_retired_req_list; u8 next_context_status_buffer; u32 irq_keep_mask; /* bitmask for interrupts that should not be masked */ - int (*emit_request)(struct intel_ringbuffer *ringbuf, - struct drm_i915_gem_request *request); + int (*emit_request)(struct drm_i915_gem_request *request); int (*emit_flush)(struct drm_i915_gem_request *request, u32 invalidate_domains, u32 flush_domains); - int (*emit_bb_start)(struct intel_ringbuffer *ringbuf, - struct intel_context *ctx, + int (*emit_bb_start)(struct drm_i915_gem_request *req, u64 offset, unsigned dispatch_flags); /** @@ -423,8 +421,8 @@ void intel_cleanup_ring_buffer(struct intel_engine_cs *ring); int intel_ring_alloc_request_extras(struct drm_i915_gem_request *request); -int __must_check intel_ring_begin(struct intel_engine_cs *ring, int n); -int __must_check intel_ring_cacheline_align(struct intel_engine_cs *ring); +int __must_check intel_ring_begin(struct drm_i915_gem_request *req, int n); +int __must_check intel_ring_cacheline_align(struct drm_i915_gem_request *req); static inline void intel_ring_emit(struct intel_engine_cs *ring, u32 data) { @@ -486,6 +484,7 @@ intel_ring_get_request(struct intel_engine_cs *ring) * will always have sufficient room to do its stuff. The request creation * code calls this automatically. */ +int intel_ring_reserve_space(struct drm_i915_gem_request *request); void intel_ring_reserved_space_reserve(struct intel_ringbuffer *ringbuf, int size); /* Cancel the reservation, e.g. because the request is being discarded. */ void intel_ring_reserved_space_cancel(struct intel_ringbuffer *ringbuf);