| 1 | #ifndef _INTEL_RINGBUFFER_H_ |
| 2 | #define _INTEL_RINGBUFFER_H_ |
| 3 | |
| 4 | struct intel_hw_status_page { |
| 5 | u32 __iomem *page_addr; |
| 6 | unsigned int gfx_addr; |
| 7 | struct drm_i915_gem_object *obj; |
| 8 | }; |
| 9 | |
| 10 | #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base)) |
| 11 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) |
| 12 | |
| 13 | #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base)) |
| 14 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) |
| 15 | |
| 16 | #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base)) |
| 17 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) |
| 18 | |
| 19 | #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base)) |
| 20 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) |
| 21 | |
| 22 | #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base)) |
| 23 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) |
| 24 | |
| 25 | #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base)) |
| 26 | #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base)) |
| 27 | #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base)) |
| 28 | |
| 29 | struct intel_ring_buffer { |
| 30 | const char *name; |
| 31 | enum intel_ring_id { |
| 32 | RCS = 0x0, |
| 33 | VCS, |
| 34 | BCS, |
| 35 | } id; |
| 36 | #define I915_NUM_RINGS 3 |
| 37 | u32 mmio_base; |
| 38 | void __iomem *virtual_start; |
| 39 | struct drm_device *dev; |
| 40 | struct drm_i915_gem_object *obj; |
| 41 | |
| 42 | u32 head; |
| 43 | u32 tail; |
| 44 | int space; |
| 45 | int size; |
| 46 | int effective_size; |
| 47 | struct intel_hw_status_page status_page; |
| 48 | |
| 49 | /** We track the position of the requests in the ring buffer, and |
| 50 | * when each is retired we increment last_retired_head as the GPU |
| 51 | * must have finished processing the request and so we know we |
| 52 | * can advance the ringbuffer up to that position. |
| 53 | * |
| 54 | * last_retired_head is set to -1 after the value is consumed so |
| 55 | * we can detect new retirements. |
| 56 | */ |
| 57 | u32 last_retired_head; |
| 58 | |
| 59 | u32 irq_refcount; /* protected by dev_priv->irq_lock */ |
| 60 | u32 irq_enable_mask; /* bitmask to enable ring interrupt */ |
| 61 | u32 trace_irq_seqno; |
| 62 | u32 sync_seqno[I915_NUM_RINGS-1]; |
| 63 | bool __must_check (*irq_get)(struct intel_ring_buffer *ring); |
| 64 | void (*irq_put)(struct intel_ring_buffer *ring); |
| 65 | |
| 66 | int (*init)(struct intel_ring_buffer *ring); |
| 67 | |
| 68 | void (*write_tail)(struct intel_ring_buffer *ring, |
| 69 | u32 value); |
| 70 | int __must_check (*flush)(struct intel_ring_buffer *ring, |
| 71 | u32 invalidate_domains, |
| 72 | u32 flush_domains); |
| 73 | int (*add_request)(struct intel_ring_buffer *ring, |
| 74 | u32 *seqno); |
| 75 | u32 (*get_seqno)(struct intel_ring_buffer *ring); |
| 76 | int (*dispatch_execbuffer)(struct intel_ring_buffer *ring, |
| 77 | u32 offset, u32 length); |
| 78 | void (*cleanup)(struct intel_ring_buffer *ring); |
| 79 | int (*sync_to)(struct intel_ring_buffer *ring, |
| 80 | struct intel_ring_buffer *to, |
| 81 | u32 seqno); |
| 82 | |
| 83 | u32 semaphore_register[3]; /*our mbox written by others */ |
| 84 | u32 signal_mbox[2]; /* mboxes this ring signals to */ |
| 85 | /** |
| 86 | * List of objects currently involved in rendering from the |
| 87 | * ringbuffer. |
| 88 | * |
| 89 | * Includes buffers having the contents of their GPU caches |
| 90 | * flushed, not necessarily primitives. last_rendering_seqno |
| 91 | * represents when the rendering involved will be completed. |
| 92 | * |
| 93 | * A reference is held on the buffer while on this list. |
| 94 | */ |
| 95 | struct list_head active_list; |
| 96 | |
| 97 | /** |
| 98 | * List of breadcrumbs associated with GPU requests currently |
| 99 | * outstanding. |
| 100 | */ |
| 101 | struct list_head request_list; |
| 102 | |
| 103 | /** |
| 104 | * List of objects currently pending a GPU write flush. |
| 105 | * |
| 106 | * All elements on this list will belong to either the |
| 107 | * active_list or flushing_list, last_rendering_seqno can |
| 108 | * be used to differentiate between the two elements. |
| 109 | */ |
| 110 | struct list_head gpu_write_list; |
| 111 | |
| 112 | /** |
| 113 | * Do we have some not yet emitted requests outstanding? |
| 114 | */ |
| 115 | u32 outstanding_lazy_request; |
| 116 | |
| 117 | wait_queue_head_t irq_queue; |
| 118 | drm_local_map_t map; |
| 119 | |
| 120 | void *private; |
| 121 | }; |
| 122 | |
| 123 | static inline unsigned |
| 124 | intel_ring_flag(struct intel_ring_buffer *ring) |
| 125 | { |
| 126 | return 1 << ring->id; |
| 127 | } |
| 128 | |
| 129 | static inline u32 |
| 130 | intel_ring_sync_index(struct intel_ring_buffer *ring, |
| 131 | struct intel_ring_buffer *other) |
| 132 | { |
| 133 | int idx; |
| 134 | |
| 135 | /* |
| 136 | * cs -> 0 = vcs, 1 = bcs |
| 137 | * vcs -> 0 = bcs, 1 = cs, |
| 138 | * bcs -> 0 = cs, 1 = vcs. |
| 139 | */ |
| 140 | |
| 141 | idx = (other - ring) - 1; |
| 142 | if (idx < 0) |
| 143 | idx += I915_NUM_RINGS; |
| 144 | |
| 145 | return idx; |
| 146 | } |
| 147 | |
| 148 | static inline u32 |
| 149 | intel_read_status_page(struct intel_ring_buffer *ring, |
| 150 | int reg) |
| 151 | { |
| 152 | return ioread32(ring->status_page.page_addr + reg); |
| 153 | } |
| 154 | |
| 155 | /** |
| 156 | * Reads a dword out of the status page, which is written to from the command |
| 157 | * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or |
| 158 | * MI_STORE_DATA_IMM. |
| 159 | * |
| 160 | * The following dwords have a reserved meaning: |
| 161 | * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. |
| 162 | * 0x04: ring 0 head pointer |
| 163 | * 0x05: ring 1 head pointer (915-class) |
| 164 | * 0x06: ring 2 head pointer (915-class) |
| 165 | * 0x10-0x1b: Context status DWords (GM45) |
| 166 | * 0x1f: Last written status offset. (GM45) |
| 167 | * |
| 168 | * The area from dword 0x20 to 0x3ff is available for driver usage. |
| 169 | */ |
| 170 | #define I915_GEM_HWS_INDEX 0x20 |
| 171 | |
| 172 | void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring); |
| 173 | |
| 174 | int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n); |
| 175 | static inline int intel_wait_ring_idle(struct intel_ring_buffer *ring) |
| 176 | { |
| 177 | return intel_wait_ring_buffer(ring, ring->size - 8); |
| 178 | } |
| 179 | |
| 180 | int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n); |
| 181 | |
| 182 | static inline void intel_ring_emit(struct intel_ring_buffer *ring, |
| 183 | u32 data) |
| 184 | { |
| 185 | iowrite32(data, ring->virtual_start + ring->tail); |
| 186 | ring->tail += 4; |
| 187 | } |
| 188 | |
| 189 | void intel_ring_advance(struct intel_ring_buffer *ring); |
| 190 | |
| 191 | u32 intel_ring_get_seqno(struct intel_ring_buffer *ring); |
| 192 | |
| 193 | int intel_init_render_ring_buffer(struct drm_device *dev); |
| 194 | int intel_init_bsd_ring_buffer(struct drm_device *dev); |
| 195 | int intel_init_blt_ring_buffer(struct drm_device *dev); |
| 196 | |
| 197 | u32 intel_ring_get_active_head(struct intel_ring_buffer *ring); |
| 198 | void intel_ring_setup_status_page(struct intel_ring_buffer *ring); |
| 199 | |
| 200 | static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring) |
| 201 | { |
| 202 | return ring->tail; |
| 203 | } |
| 204 | |
| 205 | static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno) |
| 206 | { |
| 207 | if (ring->trace_irq_seqno == 0 && ring->irq_get(ring)) |
| 208 | ring->trace_irq_seqno = seqno; |
| 209 | } |
| 210 | |
| 211 | /* DRI warts */ |
| 212 | int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size); |
| 213 | |
| 214 | #endif /* _INTEL_RINGBUFFER_H_ */ |