Commit | Line | Data |
---|---|---|
8187a2b7 ZN |
1 | #ifndef _INTEL_RINGBUFFER_H_ |
2 | #define _INTEL_RINGBUFFER_H_ | |
3 | ||
4 | struct intel_hw_status_page { | |
78501eac | 5 | u32 __iomem *page_addr; |
8187a2b7 | 6 | unsigned int gfx_addr; |
05394f39 | 7 | struct drm_i915_gem_object *obj; |
8187a2b7 ZN |
8 | }; |
9 | ||
cae5852d ZN |
10 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) |
11 | ||
12 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base)) | |
870e86dd | 13 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val) |
cae5852d ZN |
14 | |
15 | #define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base)) | |
6c0e1c55 | 16 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val) |
cae5852d ZN |
17 | |
18 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base)) | |
570ef608 | 19 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val) |
cae5852d ZN |
20 | |
21 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base)) | |
7f2ab699 | 22 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val) |
870e86dd | 23 | |
8187a2b7 ZN |
24 | struct drm_i915_gem_execbuffer2; |
25 | struct intel_ring_buffer { | |
26 | const char *name; | |
9220434a CW |
27 | enum intel_ring_id { |
28 | RING_RENDER = 0x1, | |
29 | RING_BSD = 0x2, | |
549f7365 | 30 | RING_BLT = 0x4, |
9220434a | 31 | } id; |
333e9fe9 | 32 | u32 mmio_base; |
8187a2b7 ZN |
33 | void *virtual_start; |
34 | struct drm_device *dev; | |
05394f39 | 35 | struct drm_i915_gem_object *obj; |
8187a2b7 ZN |
36 | |
37 | unsigned int head; | |
38 | unsigned int tail; | |
780f0ca3 | 39 | int space; |
c2c347a9 | 40 | int size; |
8187a2b7 ZN |
41 | struct intel_hw_status_page status_page; |
42 | ||
b2223497 CW |
43 | u32 irq_seqno; /* last seq seem at irq time */ |
44 | u32 waiting_seqno; | |
8187a2b7 | 45 | int user_irq_refcount; |
78501eac CW |
46 | void (*user_irq_get)(struct intel_ring_buffer *ring); |
47 | void (*user_irq_put)(struct intel_ring_buffer *ring); | |
8187a2b7 | 48 | |
78501eac | 49 | int (*init)(struct intel_ring_buffer *ring); |
8187a2b7 | 50 | |
78501eac | 51 | void (*write_tail)(struct intel_ring_buffer *ring, |
297b0c5b | 52 | u32 value); |
78501eac CW |
53 | void (*flush)(struct intel_ring_buffer *ring, |
54 | u32 invalidate_domains, | |
55 | u32 flush_domains); | |
3cce469c CW |
56 | int (*add_request)(struct intel_ring_buffer *ring, |
57 | u32 *seqno); | |
78501eac CW |
58 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
59 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, | |
c4e7a414 | 60 | u32 offset, u32 length); |
8d19215b | 61 | void (*cleanup)(struct intel_ring_buffer *ring); |
8187a2b7 ZN |
62 | |
63 | /** | |
64 | * List of objects currently involved in rendering from the | |
65 | * ringbuffer. | |
66 | * | |
67 | * Includes buffers having the contents of their GPU caches | |
68 | * flushed, not necessarily primitives. last_rendering_seqno | |
69 | * represents when the rendering involved will be completed. | |
70 | * | |
71 | * A reference is held on the buffer while on this list. | |
72 | */ | |
73 | struct list_head active_list; | |
74 | ||
75 | /** | |
76 | * List of breadcrumbs associated with GPU requests currently | |
77 | * outstanding. | |
78 | */ | |
79 | struct list_head request_list; | |
80 | ||
64193406 CW |
81 | /** |
82 | * List of objects currently pending a GPU write flush. | |
83 | * | |
84 | * All elements on this list will belong to either the | |
85 | * active_list or flushing_list, last_rendering_seqno can | |
86 | * be used to differentiate between the two elements. | |
87 | */ | |
88 | struct list_head gpu_write_list; | |
89 | ||
a56ba56c CW |
90 | /** |
91 | * Do we have some not yet emitted requests outstanding? | |
92 | */ | |
5d97eb69 | 93 | u32 outstanding_lazy_request; |
a56ba56c | 94 | |
8187a2b7 ZN |
95 | wait_queue_head_t irq_queue; |
96 | drm_local_map_t map; | |
8d19215b ZN |
97 | |
98 | void *private; | |
8187a2b7 ZN |
99 | }; |
100 | ||
101 | static inline u32 | |
102 | intel_read_status_page(struct intel_ring_buffer *ring, | |
78501eac | 103 | int reg) |
8187a2b7 | 104 | { |
78501eac | 105 | return ioread32(ring->status_page.page_addr + reg); |
8187a2b7 ZN |
106 | } |
107 | ||
78501eac | 108 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
e1f99ce6 CW |
109 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
110 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); | |
78501eac CW |
111 | |
112 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, | |
113 | u32 data) | |
e898cd22 | 114 | { |
78501eac | 115 | iowrite32(data, ring->virtual_start + ring->tail); |
e898cd22 CW |
116 | ring->tail += 4; |
117 | } | |
118 | ||
78501eac | 119 | void intel_ring_advance(struct intel_ring_buffer *ring); |
8187a2b7 | 120 | |
78501eac | 121 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
8187a2b7 | 122 | |
5c1143bb XH |
123 | int intel_init_render_ring_buffer(struct drm_device *dev); |
124 | int intel_init_bsd_ring_buffer(struct drm_device *dev); | |
549f7365 | 125 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
8187a2b7 | 126 | |
78501eac CW |
127 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
128 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); | |
79f321b7 | 129 | |
8187a2b7 | 130 | #endif /* _INTEL_RINGBUFFER_H_ */ |