drm/i915: Only grab correct forcewake for the engine with execlists
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.h
1 /*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #ifndef _INTEL_LRC_H_
25 #define _INTEL_LRC_H_
26
27 #define GEN8_LR_CONTEXT_ALIGN 4096
28
29 /* Execlists regs */
30 #define RING_ELSP(ring) _MMIO((ring)->mmio_base + 0x230)
31 #define RING_EXECLIST_STATUS_LO(ring) _MMIO((ring)->mmio_base + 0x234)
32 #define RING_EXECLIST_STATUS_HI(ring) _MMIO((ring)->mmio_base + 0x234 + 4)
33 #define RING_CONTEXT_CONTROL(ring) _MMIO((ring)->mmio_base + 0x244)
34 #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH (1 << 3)
35 #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0)
36 #define CTX_CTRL_RS_CTX_ENABLE (1 << 1)
37 #define RING_CONTEXT_STATUS_BUF_BASE(ring) _MMIO((ring)->mmio_base + 0x370)
38 #define RING_CONTEXT_STATUS_BUF_LO(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8)
39 #define RING_CONTEXT_STATUS_BUF_HI(ring, i) _MMIO((ring)->mmio_base + 0x370 + (i) * 8 + 4)
40 #define RING_CONTEXT_STATUS_PTR(ring) _MMIO((ring)->mmio_base + 0x3a0)
41
42 /* The docs specify that the write pointer wraps around after 5h, "After status
43 * is written out to the last available status QW at offset 5h, this pointer
44 * wraps to 0."
45 *
46 * Therefore, one must infer than even though there are 3 bits available, 6 and
47 * 7 appear to be * reserved.
48 */
49 #define GEN8_CSB_ENTRIES 6
50 #define GEN8_CSB_PTR_MASK 0x7
51 #define GEN8_CSB_READ_PTR_MASK (GEN8_CSB_PTR_MASK << 8)
52 #define GEN8_CSB_WRITE_PTR_MASK (GEN8_CSB_PTR_MASK << 0)
53 #define GEN8_CSB_WRITE_PTR(csb_status) \
54 (((csb_status) & GEN8_CSB_WRITE_PTR_MASK) >> 0)
55 #define GEN8_CSB_READ_PTR(csb_status) \
56 (((csb_status) & GEN8_CSB_READ_PTR_MASK) >> 8)
57
58 /* Logical Rings */
59 int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request);
60 int intel_logical_ring_reserve_space(struct drm_i915_gem_request *request);
61 void intel_logical_ring_stop(struct intel_engine_cs *engine);
62 void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
63 int intel_logical_rings_init(struct drm_device *dev);
64 int intel_logical_ring_begin(struct drm_i915_gem_request *req, int num_dwords);
65
66 int logical_ring_flush_all_caches(struct drm_i915_gem_request *req);
67 /**
68 * intel_logical_ring_advance() - advance the ringbuffer tail
69 * @ringbuf: Ringbuffer to advance.
70 *
71 * The tail is only updated in our logical ringbuffer struct.
72 */
73 static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
74 {
75 ringbuf->tail &= ringbuf->size - 1;
76 }
77 /**
78 * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
79 * @ringbuf: Ringbuffer to write to.
80 * @data: DWORD to write.
81 */
82 static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
83 u32 data)
84 {
85 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
86 ringbuf->tail += 4;
87 }
88 static inline void intel_logical_ring_emit_reg(struct intel_ringbuffer *ringbuf,
89 i915_reg_t reg)
90 {
91 intel_logical_ring_emit(ringbuf, i915_mmio_reg_offset(reg));
92 }
93
94 /* Logical Ring Contexts */
95
96 /* One extra page is added before LRC for GuC as shared data */
97 #define LRC_GUCSHR_PN (0)
98 #define LRC_PPHWSP_PN (LRC_GUCSHR_PN + 1)
99 #define LRC_STATE_PN (LRC_PPHWSP_PN + 1)
100
101 void intel_lr_context_free(struct intel_context *ctx);
102 uint32_t intel_lr_context_size(struct intel_engine_cs *engine);
103 int intel_lr_context_deferred_alloc(struct intel_context *ctx,
104 struct intel_engine_cs *engine);
105 void intel_lr_context_unpin(struct intel_context *ctx,
106 struct intel_engine_cs *engine);
107 void intel_lr_context_reset(struct drm_device *dev,
108 struct intel_context *ctx);
109 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
110 struct intel_engine_cs *engine);
111
112 u32 intel_execlists_ctx_id(struct intel_context *ctx,
113 struct intel_engine_cs *engine);
114
115 /* Execlists */
116 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
117 struct i915_execbuffer_params;
118 int intel_execlists_submission(struct i915_execbuffer_params *params,
119 struct drm_i915_gem_execbuffer2 *args,
120 struct list_head *vmas);
121
122 void intel_execlists_retire_requests(struct intel_engine_cs *engine);
123
124 #endif /* _INTEL_LRC_H_ */
This page took 0.034966 seconds and 5 git commands to generate.