drm/i915/bdw: Document Logical Rings, LR contexts and Execlists
[deliverable/linux.git] / drivers / gpu / drm / i915 / intel_lrc.h
CommitLineData
b20385f1
OM
1/*
2 * Copyright © 2014 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24#ifndef _INTEL_LRC_H_
25#define _INTEL_LRC_H_
26
4ba70e44
OM
27/* Execlists regs */
28#define RING_ELSP(ring) ((ring)->mmio_base+0x230)
29#define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234)
30#define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244)
31#define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370)
32#define RING_CONTEXT_STATUS_PTR(ring) ((ring)->mmio_base+0x3a0)
33
454afebd
OM
34/* Logical Rings */
35void intel_logical_ring_stop(struct intel_engine_cs *ring);
36void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
37int intel_logical_rings_init(struct drm_device *dev);
38
48e29f55 39int logical_ring_flush_all_caches(struct intel_ringbuffer *ringbuf);
82e104cc 40void intel_logical_ring_advance_and_submit(struct intel_ringbuffer *ringbuf);
73e4d07f
OM
41/**
42 * intel_logical_ring_advance() - advance the ringbuffer tail
43 * @ringbuf: Ringbuffer to advance.
44 *
45 * The tail is only updated in our logical ringbuffer struct.
46 */
82e104cc
OM
47static inline void intel_logical_ring_advance(struct intel_ringbuffer *ringbuf)
48{
49 ringbuf->tail &= ringbuf->size - 1;
50}
73e4d07f
OM
51/**
52 * intel_logical_ring_emit() - write a DWORD to the ringbuffer.
53 * @ringbuf: Ringbuffer to write to.
54 * @data: DWORD to write.
55 */
82e104cc
OM
56static inline void intel_logical_ring_emit(struct intel_ringbuffer *ringbuf,
57 u32 data)
58{
59 iowrite32(data, ringbuf->virtual_start + ringbuf->tail);
60 ringbuf->tail += 4;
61}
62int intel_logical_ring_begin(struct intel_ringbuffer *ringbuf, int num_dwords);
63
ede7d42b
OM
64/* Logical Ring Contexts */
65void intel_lr_context_free(struct intel_context *ctx);
66int intel_lr_context_deferred_create(struct intel_context *ctx,
67 struct intel_engine_cs *ring);
68
127f1003
OM
69/* Execlists */
70int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
454afebd
OM
71int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
72 struct intel_engine_cs *ring,
73 struct intel_context *ctx,
74 struct drm_i915_gem_execbuffer2 *args,
75 struct list_head *vmas,
76 struct drm_i915_gem_object *batch_obj,
77 u64 exec_start, u32 flags);
84b790f8 78u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj);
127f1003 79
73e4d07f
OM
80/**
81 * struct intel_ctx_submit_request - queued context submission request
82 * @ctx: Context to submit to the ELSP.
83 * @ring: Engine to submit it to.
84 * @tail: how far in the context's ringbuffer this request goes to.
85 * @execlist_link: link in the submission queue.
86 * @work: workqueue for processing this request in a bottom half.
87 * @elsp_submitted: no. of times this request has been sent to the ELSP.
88 *
89 * The ELSP only accepts two elements at a time, so we queue context/tail
90 * pairs on a given queue (ring->execlist_queue) until the hardware is
91 * available. The queue serves a double purpose: we also use it to keep track
92 * of the up to 2 contexts currently in the hardware (usually one in execution
93 * and the other queued up by the GPU): We only remove elements from the head
94 * of the queue when the hardware informs us that an element has been
95 * completed.
96 *
97 * All accesses to the queue are mediated by a spinlock (ring->execlist_lock).
98 */
acdd884a
MT
99struct intel_ctx_submit_request {
100 struct intel_context *ctx;
101 struct intel_engine_cs *ring;
102 u32 tail;
103
104 struct list_head execlist_link;
e981e7b1 105 struct work_struct work;
e1fee72c
OM
106
107 int elsp_submitted;
acdd884a
MT
108};
109
e981e7b1
TD
110void intel_execlists_handle_ctx_events(struct intel_engine_cs *ring);
111
b20385f1 112#endif /* _INTEL_LRC_H_ */
This page took 0.040068 seconds and 5 git commands to generate.