Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net
[deliverable/linux.git] / drivers / gpu / drm / i915 / i915_gem_evict.c
CommitLineData
b47eb4a2
CW
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
760285e7 29#include <drm/drmP.h>
b47eb4a2 30#include "i915_drv.h"
760285e7 31#include <drm/i915_drm.h>
db53a302 32#include "i915_trace.h"
b47eb4a2 33
cd377ea9 34static bool
f6cd1f15 35mark_free(struct i915_vma *vma, struct list_head *unwind)
b47eb4a2 36{
f6cd1f15 37 if (vma->obj->pin_count)
1b50247a
CW
38 return false;
39
b93dab6e
DV
40 if (WARN_ON(!list_empty(&vma->exec_list)))
41 return false;
42
82a55ad1 43 list_add(&vma->exec_list, unwind);
2f633156 44 return drm_mm_scan_add_block(&vma->node);
b47eb4a2
CW
45}
46
47int
f6cd1f15
BW
48i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
49 int min_size, unsigned alignment, unsigned cache_level,
86a1ee26 50 bool mappable, bool nonblocking)
b47eb4a2
CW
51{
52 drm_i915_private_t *dev_priv = dev->dev_private;
cd377ea9 53 struct list_head eviction_list, unwind_list;
2f633156 54 struct i915_vma *vma;
cd377ea9 55 int ret = 0;
b47eb4a2 56
db53a302
CW
57 trace_i915_gem_evict(dev, min_size, alignment, mappable);
58
cd377ea9
CW
59 /*
60 * The goal is to evict objects and amalgamate space in LRU order.
61 * The oldest idle objects reside on the inactive list, which is in
62 * retirement order. The next objects to retire are those on the (per
63 * ring) active list that do not have an outstanding flush. Once the
64 * hardware reports completion (the seqno is updated after the
65 * batchbuffer has been finished) the clean buffer objects would
66 * be retired to the inactive list. Any dirty objects would be added
67 * to the tail of the flushing list. So after processing the clean
68 * active objects we need to emit a MI_FLUSH to retire the flushing
69 * list, hence the retirement order of the flushing list is in
70 * advance of the dirty objects on the active lists.
71 *
72 * The retirement sequence is thus:
73 * 1. Inactive objects (already retired)
74 * 2. Clean active objects
75 * 3. Flushing list
76 * 4. Dirty active objects.
77 *
78 * On each list, the oldest objects lie at the HEAD with the freshest
79 * object on the TAIL.
80 */
81
82 INIT_LIST_HEAD(&unwind_list);
f6cd1f15
BW
83 if (mappable) {
84 BUG_ON(!i915_is_ggtt(vm));
5cef07e1 85 drm_mm_init_scan_with_range(&vm->mm, min_size,
93bd8649
BW
86 alignment, cache_level, 0,
87 dev_priv->gtt.mappable_end);
f6cd1f15 88 } else
5cef07e1 89 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
cd377ea9 90
ad071acb 91search_again:
cd377ea9 92 /* First see if there is a large enough contiguous idle region... */
ca191b13 93 list_for_each_entry(vma, &vm->inactive_list, mm_list) {
f6cd1f15 94 if (mark_free(vma, &unwind_list))
cd377ea9
CW
95 goto found;
96 }
b47eb4a2 97
86a1ee26
CW
98 if (nonblocking)
99 goto none;
b47eb4a2 100
cd377ea9 101 /* Now merge in the soon-to-be-expired objects... */
ca191b13 102 list_for_each_entry(vma, &vm->active_list, mm_list) {
f6cd1f15 103 if (mark_free(vma, &unwind_list))
cd377ea9
CW
104 goto found;
105 }
106
86a1ee26 107none:
cd377ea9 108 /* Nothing found, clean up and bail out! */
092de6f2 109 while (!list_empty(&unwind_list)) {
82a55ad1
BW
110 vma = list_first_entry(&unwind_list,
111 struct i915_vma,
092de6f2 112 exec_list);
2f633156 113 ret = drm_mm_scan_remove_block(&vma->node);
cd377ea9 114 BUG_ON(ret);
092de6f2 115
82a55ad1 116 list_del_init(&vma->exec_list);
cd377ea9
CW
117 }
118
ad071acb
CW
119 /* Can we unpin some objects such as idle hw contents,
120 * or pending flips?
cd377ea9 121 */
ad071acb
CW
122 ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
123 if (ret)
124 return ret;
125
126 /* Only idle the GPU and repeat the search once */
127 i915_gem_retire_requests(dev);
128 nonblocking = true;
129 goto search_again;
cd377ea9
CW
130
131found:
e39a0150
CW
132 /* drm_mm doesn't allow any other other operations while
133 * scanning, therefore store to be evicted objects on a
134 * temporary list. */
cd377ea9 135 INIT_LIST_HEAD(&eviction_list);
e39a0150 136 while (!list_empty(&unwind_list)) {
82a55ad1
BW
137 vma = list_first_entry(&unwind_list,
138 struct i915_vma,
432e58ed 139 exec_list);
2f633156 140 if (drm_mm_scan_remove_block(&vma->node)) {
82a55ad1
BW
141 list_move(&vma->exec_list, &eviction_list);
142 drm_gem_object_reference(&vma->obj->base);
e39a0150
CW
143 continue;
144 }
82a55ad1 145 list_del_init(&vma->exec_list);
cd377ea9 146 }
b47eb4a2 147
cd377ea9 148 /* Unbinding will emit any required flushes */
e39a0150 149 while (!list_empty(&eviction_list)) {
8637b407 150 struct drm_gem_object *obj;
82a55ad1
BW
151 vma = list_first_entry(&eviction_list,
152 struct i915_vma,
432e58ed 153 exec_list);
8637b407
BW
154
155 obj = &vma->obj->base;
156 list_del_init(&vma->exec_list);
e39a0150 157 if (ret == 0)
82a55ad1 158 ret = i915_vma_unbind(vma);
092de6f2 159
8637b407 160 drm_gem_object_unreference(obj);
b47eb4a2 161 }
cd377ea9 162
e39a0150 163 return ret;
b47eb4a2
CW
164}
165
68c8c17f
BW
166/**
167 * i915_gem_evict_vm - Try to free up VM space
168 *
169 * @vm: Address space to evict from
170 * @do_idle: Boolean directing whether to idle first.
171 *
172 * VM eviction is about freeing up virtual address space. If one wants fine
173 * grained eviction, they should see evict something for more details. In terms
174 * of freeing up actual system memory, this function may not accomplish the
175 * desired result. An object may be shared in multiple address space, and this
176 * function will not assert those objects be freed.
177 *
178 * Using do_idle will result in a more complete eviction because it retires, and
179 * inactivates current BOs.
180 */
181int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle)
7b796122
BW
182{
183 struct i915_vma *vma, *next;
184 int ret;
185
bcccff84
BW
186 trace_i915_gem_evict_vm(vm);
187
7b796122
BW
188 if (do_idle) {
189 ret = i915_gpu_idle(vm->dev);
190 if (ret)
191 return ret;
192
193 i915_gem_retire_requests(vm->dev);
194 }
195
196 list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list)
197 if (vma->obj->pin_count == 0)
198 WARN_ON(i915_vma_unbind(vma));
199
200 return 0;
201}
202
b47eb4a2 203int
6c085a72 204i915_gem_evict_everything(struct drm_device *dev)
b47eb4a2
CW
205{
206 drm_i915_private_t *dev_priv = dev->dev_private;
f6cd1f15 207 struct i915_address_space *vm;
f6cd1f15 208 bool lists_empty = true;
b4519513 209 int ret;
b47eb4a2 210
f6cd1f15
BW
211 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
212 lists_empty = (list_empty(&vm->inactive_list) &&
213 list_empty(&vm->active_list));
214 if (!lists_empty)
215 lists_empty = false;
216 }
217
b47eb4a2
CW
218 if (lists_empty)
219 return -ENOSPC;
220
6c085a72 221 trace_i915_gem_evict_everything(dev);
db53a302 222
b2da9fe5
BW
223 /* The gpu_idle will flush everything in the write domain to the
224 * active list. Then we must move everything off the active list
225 * with retire requests.
226 */
b4519513
CW
227 ret = i915_gpu_idle(dev);
228 if (ret)
229 return ret;
b2da9fe5
BW
230
231 i915_gem_retire_requests(dev);
232
a39d7efc 233 /* Having flushed everything, unbind() should never raise an error */
7b796122
BW
234 list_for_each_entry(vm, &dev_priv->vm_list, global_link)
235 WARN_ON(i915_gem_evict_vm(vm, false));
b47eb4a2 236
b4519513 237 return 0;
b47eb4a2 238}
This page took 0.223568 seconds and 5 git commands to generate.