Commit | Line | Data |
---|---|---|
b47eb4a2 CW |
1 | /* |
2 | * Copyright © 2008-2010 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | * Chris Wilson <chris@chris-wilson.co.uuk> | |
26 | * | |
27 | */ | |
28 | ||
760285e7 | 29 | #include <drm/drmP.h> |
b47eb4a2 | 30 | #include "i915_drv.h" |
760285e7 | 31 | #include <drm/i915_drm.h> |
db53a302 | 32 | #include "i915_trace.h" |
b47eb4a2 | 33 | |
cd377ea9 | 34 | static bool |
05394f39 | 35 | mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) |
b47eb4a2 | 36 | { |
1b50247a CW |
37 | if (obj->pin_count) |
38 | return false; | |
39 | ||
432e58ed | 40 | list_add(&obj->exec_list, unwind); |
c6cfb325 | 41 | return drm_mm_scan_add_block(&obj->gtt_space); |
b47eb4a2 CW |
42 | } |
43 | ||
44 | int | |
a6e0aa42 | 45 | i915_gem_evict_something(struct drm_device *dev, int min_size, |
42d6ab48 | 46 | unsigned alignment, unsigned cache_level, |
86a1ee26 | 47 | bool mappable, bool nonblocking) |
b47eb4a2 CW |
48 | { |
49 | drm_i915_private_t *dev_priv = dev->dev_private; | |
cd377ea9 | 50 | struct list_head eviction_list, unwind_list; |
05394f39 | 51 | struct drm_i915_gem_object *obj; |
cd377ea9 | 52 | int ret = 0; |
b47eb4a2 | 53 | |
db53a302 CW |
54 | trace_i915_gem_evict(dev, min_size, alignment, mappable); |
55 | ||
cd377ea9 CW |
56 | /* |
57 | * The goal is to evict objects and amalgamate space in LRU order. | |
58 | * The oldest idle objects reside on the inactive list, which is in | |
59 | * retirement order. The next objects to retire are those on the (per | |
60 | * ring) active list that do not have an outstanding flush. Once the | |
61 | * hardware reports completion (the seqno is updated after the | |
62 | * batchbuffer has been finished) the clean buffer objects would | |
63 | * be retired to the inactive list. Any dirty objects would be added | |
64 | * to the tail of the flushing list. So after processing the clean | |
65 | * active objects we need to emit a MI_FLUSH to retire the flushing | |
66 | * list, hence the retirement order of the flushing list is in | |
67 | * advance of the dirty objects on the active lists. | |
68 | * | |
69 | * The retirement sequence is thus: | |
70 | * 1. Inactive objects (already retired) | |
71 | * 2. Clean active objects | |
72 | * 3. Flushing list | |
73 | * 4. Dirty active objects. | |
74 | * | |
75 | * On each list, the oldest objects lie at the HEAD with the freshest | |
76 | * object on the TAIL. | |
77 | */ | |
78 | ||
79 | INIT_LIST_HEAD(&unwind_list); | |
a6e0aa42 | 80 | if (mappable) |
6b9d89b4 | 81 | drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, |
42d6ab48 | 82 | min_size, alignment, cache_level, |
5d4545ae | 83 | 0, dev_priv->gtt.mappable_end); |
a6e0aa42 | 84 | else |
6b9d89b4 | 85 | drm_mm_init_scan(&dev_priv->mm.gtt_space, |
42d6ab48 | 86 | min_size, alignment, cache_level); |
cd377ea9 CW |
87 | |
88 | /* First see if there is a large enough contiguous idle region... */ | |
05394f39 CW |
89 | list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { |
90 | if (mark_free(obj, &unwind_list)) | |
cd377ea9 CW |
91 | goto found; |
92 | } | |
b47eb4a2 | 93 | |
86a1ee26 CW |
94 | if (nonblocking) |
95 | goto none; | |
b47eb4a2 | 96 | |
cd377ea9 | 97 | /* Now merge in the soon-to-be-expired objects... */ |
05394f39 | 98 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
05394f39 | 99 | if (mark_free(obj, &unwind_list)) |
cd377ea9 CW |
100 | goto found; |
101 | } | |
102 | ||
86a1ee26 | 103 | none: |
cd377ea9 | 104 | /* Nothing found, clean up and bail out! */ |
092de6f2 CW |
105 | while (!list_empty(&unwind_list)) { |
106 | obj = list_first_entry(&unwind_list, | |
107 | struct drm_i915_gem_object, | |
108 | exec_list); | |
109 | ||
c6cfb325 | 110 | ret = drm_mm_scan_remove_block(&obj->gtt_space); |
cd377ea9 | 111 | BUG_ON(ret); |
092de6f2 CW |
112 | |
113 | list_del_init(&obj->exec_list); | |
cd377ea9 CW |
114 | } |
115 | ||
116 | /* We expect the caller to unpin, evict all and try again, or give up. | |
117 | * So calling i915_gem_evict_everything() is unnecessary. | |
118 | */ | |
119 | return -ENOSPC; | |
120 | ||
121 | found: | |
e39a0150 CW |
122 | /* drm_mm doesn't allow any other other operations while |
123 | * scanning, therefore store to be evicted objects on a | |
124 | * temporary list. */ | |
cd377ea9 | 125 | INIT_LIST_HEAD(&eviction_list); |
e39a0150 | 126 | while (!list_empty(&unwind_list)) { |
05394f39 CW |
127 | obj = list_first_entry(&unwind_list, |
128 | struct drm_i915_gem_object, | |
432e58ed | 129 | exec_list); |
c6cfb325 | 130 | if (drm_mm_scan_remove_block(&obj->gtt_space)) { |
432e58ed | 131 | list_move(&obj->exec_list, &eviction_list); |
b6708242 | 132 | drm_gem_object_reference(&obj->base); |
e39a0150 CW |
133 | continue; |
134 | } | |
432e58ed | 135 | list_del_init(&obj->exec_list); |
cd377ea9 | 136 | } |
b47eb4a2 | 137 | |
cd377ea9 | 138 | /* Unbinding will emit any required flushes */ |
e39a0150 | 139 | while (!list_empty(&eviction_list)) { |
05394f39 CW |
140 | obj = list_first_entry(&eviction_list, |
141 | struct drm_i915_gem_object, | |
432e58ed | 142 | exec_list); |
e39a0150 | 143 | if (ret == 0) |
05394f39 | 144 | ret = i915_gem_object_unbind(obj); |
092de6f2 | 145 | |
432e58ed | 146 | list_del_init(&obj->exec_list); |
05394f39 | 147 | drm_gem_object_unreference(&obj->base); |
b47eb4a2 | 148 | } |
cd377ea9 | 149 | |
e39a0150 | 150 | return ret; |
b47eb4a2 CW |
151 | } |
152 | ||
153 | int | |
6c085a72 | 154 | i915_gem_evict_everything(struct drm_device *dev) |
b47eb4a2 CW |
155 | { |
156 | drm_i915_private_t *dev_priv = dev->dev_private; | |
a39d7efc | 157 | struct drm_i915_gem_object *obj, *next; |
b47eb4a2 | 158 | bool lists_empty; |
b4519513 | 159 | int ret; |
b47eb4a2 | 160 | |
b47eb4a2 | 161 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
395b70be | 162 | list_empty(&dev_priv->mm.active_list)); |
b47eb4a2 CW |
163 | if (lists_empty) |
164 | return -ENOSPC; | |
165 | ||
6c085a72 | 166 | trace_i915_gem_evict_everything(dev); |
db53a302 | 167 | |
b2da9fe5 BW |
168 | /* The gpu_idle will flush everything in the write domain to the |
169 | * active list. Then we must move everything off the active list | |
170 | * with retire requests. | |
171 | */ | |
b4519513 CW |
172 | ret = i915_gpu_idle(dev); |
173 | if (ret) | |
174 | return ret; | |
b2da9fe5 BW |
175 | |
176 | i915_gem_retire_requests(dev); | |
177 | ||
a39d7efc | 178 | /* Having flushed everything, unbind() should never raise an error */ |
5eac3ab4 | 179 | list_for_each_entry_safe(obj, next, |
6c085a72 CW |
180 | &dev_priv->mm.inactive_list, mm_list) |
181 | if (obj->pin_count == 0) | |
182 | WARN_ON(i915_gem_object_unbind(obj)); | |
b47eb4a2 | 183 | |
b4519513 | 184 | return 0; |
b47eb4a2 | 185 | } |