Commit | Line | Data |
---|---|---|
b47eb4a2 CW |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Chris Wilson <chris@chris-wilson.co.uuk> | |
26 | * | |
27 | */ | |
28 | ||
760285e7 | 29 | #include <drm/drmP.h> |
760285e7 | 30 | #include <drm/i915_drm.h> |
74e21ac2 CW |
31 | |
32 | #include "i915_drv.h" | |
33 | #include "intel_drv.h" | |
db53a302 | 34 | #include "i915_trace.h" |
b47eb4a2 | 35 | |
cd377ea9 | 36 | static bool |
f6cd1f15 | 37 | mark_free(struct i915_vma *vma, struct list_head *unwind) |
b47eb4a2 | 38 | { |
3036537d | 39 | if (vma->pin_count) |
1b50247a CW |
40 | return false; |
41 | ||
b93dab6e DV |
42 | if (WARN_ON(!list_empty(&vma->exec_list))) |
43 | return false; | |
44 | ||
82a55ad1 | 45 | list_add(&vma->exec_list, unwind); |
2f633156 | 46 | return drm_mm_scan_add_block(&vma->node); |
b47eb4a2 CW |
47 | } |
48 | ||
c2c1d491 DV |
49 | /** |
50 | * i915_gem_evict_something - Evict vmas to make room for binding a new one | |
51 | * @dev: drm_device | |
52 | * @vm: address space to evict from | |
53 | * @size: size of the desired free space | |
54 | * @alignment: alignment constraint of the desired free space | |
55 | * @cache_level: cache_level for the desired space | |
56 | * @mappable: whether the free space must be mappable | |
57 | * @nonblocking: whether evicting active objects is allowed or not | |
58 | * | |
59 | * This function will try to evict vmas until a free space satisfying the | |
60 | * requirements is found. Callers must check first whether any such hole exists | |
61 | * already before calling this function. | |
62 | * | |
63 | * This function is used by the object/vma binding code. | |
64 | * | |
65 | * To clarify: This is for freeing up virtual address space, not for freeing | |
66 | * memory in e.g. the shrinker. | |
67 | */ | |
b47eb4a2 | 68 | int |
f6cd1f15 BW |
69 | i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, |
70 | int min_size, unsigned alignment, unsigned cache_level, | |
d23db88c | 71 | unsigned long start, unsigned long end, |
1ec9e26d | 72 | unsigned flags) |
b47eb4a2 | 73 | { |
cd377ea9 | 74 | struct list_head eviction_list, unwind_list; |
2f633156 | 75 | struct i915_vma *vma; |
cd377ea9 | 76 | int ret = 0; |
74e21ac2 | 77 | int pass = 0; |
b47eb4a2 | 78 | |
1ec9e26d | 79 | trace_i915_gem_evict(dev, min_size, alignment, flags); |
db53a302 | 80 | |
cd377ea9 CW |
81 | /* |
82 | * The goal is to evict objects and amalgamate space in LRU order. | |
83 | * The oldest idle objects reside on the inactive list, which is in | |
84 | * retirement order. The next objects to retire are those on the (per | |
85 | * ring) active list that do not have an outstanding flush. Once the | |
86 | * hardware reports completion (the seqno is updated after the | |
87 | * batchbuffer has been finished) the clean buffer objects would | |
88 | * be retired to the inactive list. Any dirty objects would be added | |
89 | * to the tail of the flushing list. So after processing the clean | |
90 | * active objects we need to emit a MI_FLUSH to retire the flushing | |
91 | * list, hence the retirement order of the flushing list is in | |
92 | * advance of the dirty objects on the active lists. | |
93 | * | |
94 | * The retirement sequence is thus: | |
95 | * 1. Inactive objects (already retired) | |
96 | * 2. Clean active objects | |
97 | * 3. Flushing list | |
98 | * 4. Dirty active objects. | |
99 | * | |
100 | * On each list, the oldest objects lie at the HEAD with the freshest | |
101 | * object on the TAIL. | |
102 | */ | |
103 | ||
104 | INIT_LIST_HEAD(&unwind_list); | |
d23db88c | 105 | if (start != 0 || end != vm->total) { |
5cef07e1 | 106 | drm_mm_init_scan_with_range(&vm->mm, min_size, |
d23db88c CW |
107 | alignment, cache_level, |
108 | start, end); | |
f6cd1f15 | 109 | } else |
5cef07e1 | 110 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
cd377ea9 | 111 | |
ad071acb | 112 | search_again: |
cd377ea9 | 113 | /* First see if there is a large enough contiguous idle region... */ |
ca191b13 | 114 | list_for_each_entry(vma, &vm->inactive_list, mm_list) { |
f6cd1f15 | 115 | if (mark_free(vma, &unwind_list)) |
cd377ea9 CW |
116 | goto found; |
117 | } | |
b47eb4a2 | 118 | |
1ec9e26d | 119 | if (flags & PIN_NONBLOCK) |
86a1ee26 | 120 | goto none; |
b47eb4a2 | 121 | |
cd377ea9 | 122 | /* Now merge in the soon-to-be-expired objects... */ |
ca191b13 | 123 | list_for_each_entry(vma, &vm->active_list, mm_list) { |
f6cd1f15 | 124 | if (mark_free(vma, &unwind_list)) |
cd377ea9 CW |
125 | goto found; |
126 | } | |
127 | ||
86a1ee26 | 128 | none: |
cd377ea9 | 129 | /* Nothing found, clean up and bail out! */ |
092de6f2 | 130 | while (!list_empty(&unwind_list)) { |
82a55ad1 BW |
131 | vma = list_first_entry(&unwind_list, |
132 | struct i915_vma, | |
092de6f2 | 133 | exec_list); |
2f633156 | 134 | ret = drm_mm_scan_remove_block(&vma->node); |
cd377ea9 | 135 | BUG_ON(ret); |
092de6f2 | 136 | |
82a55ad1 | 137 | list_del_init(&vma->exec_list); |
cd377ea9 CW |
138 | } |
139 | ||
ad071acb CW |
140 | /* Can we unpin some objects such as idle hw contents, |
141 | * or pending flips? | |
cd377ea9 | 142 | */ |
1ec9e26d | 143 | if (flags & PIN_NONBLOCK) |
74e21ac2 | 144 | return -ENOSPC; |
ad071acb CW |
145 | |
146 | /* Only idle the GPU and repeat the search once */ | |
74e21ac2 CW |
147 | if (pass++ == 0) { |
148 | ret = i915_gpu_idle(dev); | |
149 | if (ret) | |
150 | return ret; | |
151 | ||
152 | i915_gem_retire_requests(dev); | |
153 | goto search_again; | |
154 | } | |
155 | ||
156 | /* If we still have pending pageflip completions, drop | |
157 | * back to userspace to give our workqueues time to | |
158 | * acquire our locks and unpin the old scanouts. | |
159 | */ | |
160 | return intel_has_pending_fb_unpin(dev) ? -EAGAIN : -ENOSPC; | |
cd377ea9 CW |
161 | |
162 | found: | |
e39a0150 CW |
163 | /* drm_mm doesn't allow any other other operations while |
164 | * scanning, therefore store to be evicted objects on a | |
165 | * temporary list. */ | |
cd377ea9 | 166 | INIT_LIST_HEAD(&eviction_list); |
e39a0150 | 167 | while (!list_empty(&unwind_list)) { |
82a55ad1 BW |
168 | vma = list_first_entry(&unwind_list, |
169 | struct i915_vma, | |
432e58ed | 170 | exec_list); |
2f633156 | 171 | if (drm_mm_scan_remove_block(&vma->node)) { |
82a55ad1 BW |
172 | list_move(&vma->exec_list, &eviction_list); |
173 | drm_gem_object_reference(&vma->obj->base); | |
e39a0150 CW |
174 | continue; |
175 | } | |
82a55ad1 | 176 | list_del_init(&vma->exec_list); |
cd377ea9 | 177 | } |
b47eb4a2 | 178 | |
cd377ea9 | 179 | /* Unbinding will emit any required flushes */ |
e39a0150 | 180 | while (!list_empty(&eviction_list)) { |
8637b407 | 181 | struct drm_gem_object *obj; |
82a55ad1 BW |
182 | vma = list_first_entry(&eviction_list, |
183 | struct i915_vma, | |
432e58ed | 184 | exec_list); |
8637b407 BW |
185 | |
186 | obj = &vma->obj->base; | |
187 | list_del_init(&vma->exec_list); | |
e39a0150 | 188 | if (ret == 0) |
82a55ad1 | 189 | ret = i915_vma_unbind(vma); |
092de6f2 | 190 | |
8637b407 | 191 | drm_gem_object_unreference(obj); |
b47eb4a2 | 192 | } |
cd377ea9 | 193 | |
e39a0150 | 194 | return ret; |
b47eb4a2 CW |
195 | } |
196 | ||
68c8c17f | 197 | /** |
c2c1d491 | 198 | * i915_gem_evict_vm - Evict all idle vmas from a vm |
68c8c17f | 199 | * |
c2c1d491 | 200 | * @vm: Address space to cleanse |
68c8c17f BW |
201 | * @do_idle: Boolean directing whether to idle first. |
202 | * | |
c2c1d491 DV |
203 | * This function evicts all idles vmas from a vm. If all unpinned vmas should be |
204 | * evicted the @do_idle needs to be set to true. | |
68c8c17f | 205 | * |
c2c1d491 DV |
206 | * This is used by the execbuf code as a last-ditch effort to defragment the |
207 | * address space. | |
208 | * | |
209 | * To clarify: This is for freeing up virtual address space, not for freeing | |
210 | * memory in e.g. the shrinker. | |
68c8c17f BW |
211 | */ |
212 | int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle) | |
7b796122 BW |
213 | { |
214 | struct i915_vma *vma, *next; | |
215 | int ret; | |
216 | ||
bcccff84 BW |
217 | trace_i915_gem_evict_vm(vm); |
218 | ||
7b796122 BW |
219 | if (do_idle) { |
220 | ret = i915_gpu_idle(vm->dev); | |
221 | if (ret) | |
222 | return ret; | |
223 | ||
224 | i915_gem_retire_requests(vm->dev); | |
225 | } | |
226 | ||
227 | list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) | |
d7f46fc4 | 228 | if (vma->pin_count == 0) |
7b796122 BW |
229 | WARN_ON(i915_vma_unbind(vma)); |
230 | ||
231 | return 0; | |
232 | } | |
233 | ||
c2c1d491 DV |
234 | /** |
235 | * i915_gem_evict_everything - Try to evict all objects | |
236 | * @dev: Device to evict objects for | |
237 | * | |
238 | * This functions tries to evict all gem objects from all address spaces. Used | |
239 | * by the shrinker as a last-ditch effort and for suspend, before releasing the | |
240 | * backing storage of all unbound objects. | |
241 | */ | |
b47eb4a2 | 242 | int |
6c085a72 | 243 | i915_gem_evict_everything(struct drm_device *dev) |
b47eb4a2 | 244 | { |
50227e1c | 245 | struct drm_i915_private *dev_priv = dev->dev_private; |
cf303626 | 246 | struct i915_address_space *vm, *v; |
f6cd1f15 | 247 | bool lists_empty = true; |
b4519513 | 248 | int ret; |
b47eb4a2 | 249 | |
f6cd1f15 BW |
250 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
251 | lists_empty = (list_empty(&vm->inactive_list) && | |
252 | list_empty(&vm->active_list)); | |
253 | if (!lists_empty) | |
254 | lists_empty = false; | |
255 | } | |
256 | ||
b47eb4a2 CW |
257 | if (lists_empty) |
258 | return -ENOSPC; | |
259 | ||
6c085a72 | 260 | trace_i915_gem_evict_everything(dev); |
db53a302 | 261 | |
b2da9fe5 BW |
262 | /* The gpu_idle will flush everything in the write domain to the |
263 | * active list. Then we must move everything off the active list | |
264 | * with retire requests. | |
265 | */ | |
b4519513 CW |
266 | ret = i915_gpu_idle(dev); |
267 | if (ret) | |
268 | return ret; | |
b2da9fe5 BW |
269 | |
270 | i915_gem_retire_requests(dev); | |
271 | ||
a39d7efc | 272 | /* Having flushed everything, unbind() should never raise an error */ |
cf303626 | 273 | list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link) |
7b796122 | 274 | WARN_ON(i915_gem_evict_vm(vm, false)); |
b47eb4a2 | 275 | |
b4519513 | 276 | return 0; |
b47eb4a2 | 277 | } |