Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
5a0e3ad6 | 33 | #include <linux/slab.h> |
771fe6b9 | 34 | #include <drm/drmP.h> |
760285e7 | 35 | #include <drm/radeon_drm.h> |
771fe6b9 | 36 | #include "radeon.h" |
99ee7fac | 37 | #include "radeon_trace.h" |
771fe6b9 | 38 | |
771fe6b9 JG |
39 | |
40 | int radeon_ttm_init(struct radeon_device *rdev); | |
41 | void radeon_ttm_fini(struct radeon_device *rdev); | |
4c788679 | 42 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
771fe6b9 JG |
43 | |
44 | /* | |
45 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
46 | * function are calling it. | |
47 | */ | |
48 | ||
67e8e3f9 MO |
49 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
50 | unsigned mem_type, int sign) | |
51 | { | |
52 | struct radeon_device *rdev = bo->rdev; | |
53 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; | |
54 | ||
55 | switch (mem_type) { | |
56 | case TTM_PL_TT: | |
57 | if (sign > 0) | |
58 | atomic64_add(size, &rdev->gtt_usage); | |
59 | else | |
60 | atomic64_sub(size, &rdev->gtt_usage); | |
61 | break; | |
62 | case TTM_PL_VRAM: | |
63 | if (sign > 0) | |
64 | atomic64_add(size, &rdev->vram_usage); | |
65 | else | |
66 | atomic64_sub(size, &rdev->vram_usage); | |
67 | break; | |
68 | } | |
69 | } | |
70 | ||
4c788679 | 71 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
771fe6b9 | 72 | { |
4c788679 | 73 | struct radeon_bo *bo; |
771fe6b9 | 74 | |
4c788679 | 75 | bo = container_of(tbo, struct radeon_bo, tbo); |
67e8e3f9 MO |
76 | |
77 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); | |
341cb9e4 | 78 | radeon_mn_unregister(bo); |
67e8e3f9 | 79 | |
4c788679 JG |
80 | mutex_lock(&bo->rdev->gem.mutex); |
81 | list_del_init(&bo->list); | |
82 | mutex_unlock(&bo->rdev->gem.mutex); | |
83 | radeon_bo_clear_surface_reg(bo); | |
c265f24d | 84 | WARN_ON(!list_empty(&bo->va)); |
441921d5 | 85 | drm_gem_object_release(&bo->gem_base); |
4c788679 | 86 | kfree(bo); |
771fe6b9 JG |
87 | } |
88 | ||
d03d8589 JG |
89 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
90 | { | |
91 | if (bo->destroy == &radeon_ttm_bo_destroy) | |
92 | return true; | |
93 | return false; | |
94 | } | |
95 | ||
312ea8da JG |
96 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
97 | { | |
deadcb36 | 98 | u32 c = 0, i; |
312ea8da | 99 | |
312ea8da | 100 | rbo->placement.placement = rbo->placements; |
20707874 | 101 | rbo->placement.busy_placement = rbo->placements; |
312ea8da | 102 | if (domain & RADEON_GEM_DOMAIN_VRAM) |
f1217ed0 CK |
103 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
104 | TTM_PL_FLAG_UNCACHED | | |
105 | TTM_PL_FLAG_VRAM; | |
106 | ||
0d0b3e74 | 107 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
02376d82 | 108 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
f1217ed0 CK |
109 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
110 | TTM_PL_FLAG_TT; | |
111 | ||
02376d82 MD |
112 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
113 | (rbo->rdev->flags & RADEON_IS_AGP)) { | |
f1217ed0 CK |
114 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
115 | TTM_PL_FLAG_UNCACHED | | |
02376d82 | 116 | TTM_PL_FLAG_TT; |
0d0b3e74 | 117 | } else { |
f1217ed0 CK |
118 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
119 | TTM_PL_FLAG_TT; | |
0d0b3e74 JG |
120 | } |
121 | } | |
f1217ed0 | 122 | |
0d0b3e74 | 123 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
02376d82 | 124 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
f1217ed0 CK |
125 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
126 | TTM_PL_FLAG_SYSTEM; | |
127 | ||
02376d82 MD |
128 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
129 | rbo->rdev->flags & RADEON_IS_AGP) { | |
f1217ed0 CK |
130 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
131 | TTM_PL_FLAG_UNCACHED | | |
02376d82 | 132 | TTM_PL_FLAG_SYSTEM; |
0d0b3e74 | 133 | } else { |
f1217ed0 CK |
134 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
135 | TTM_PL_FLAG_SYSTEM; | |
0d0b3e74 JG |
136 | } |
137 | } | |
9fb03e63 | 138 | if (!c) |
f1217ed0 CK |
139 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING | |
140 | TTM_PL_FLAG_SYSTEM; | |
141 | ||
312ea8da JG |
142 | rbo->placement.num_placement = c; |
143 | rbo->placement.num_busy_placement = c; | |
deadcb36 | 144 | |
f1217ed0 CK |
145 | for (i = 0; i < c; ++i) { |
146 | rbo->placements[i].fpfn = 0; | |
c8584039 MD |
147 | if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
148 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) | |
149 | rbo->placements[i].lpfn = | |
150 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
151 | else | |
152 | rbo->placements[i].lpfn = 0; | |
f1217ed0 CK |
153 | } |
154 | ||
deadcb36 LK |
155 | /* |
156 | * Use two-ended allocation depending on the buffer size to | |
157 | * improve fragmentation quality. | |
158 | * 512kb was measured as the most optimal number. | |
159 | */ | |
c8584039 MD |
160 | if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
161 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) && | |
162 | rbo->tbo.mem.size > 512 * 1024) { | |
deadcb36 | 163 | for (i = 0; i < c; i++) { |
f1217ed0 | 164 | rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; |
deadcb36 LK |
165 | } |
166 | } | |
312ea8da JG |
167 | } |
168 | ||
441921d5 | 169 | int radeon_bo_create(struct radeon_device *rdev, |
268b2510 | 170 | unsigned long size, int byte_align, bool kernel, u32 domain, |
02376d82 | 171 | u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr) |
771fe6b9 | 172 | { |
4c788679 | 173 | struct radeon_bo *bo; |
771fe6b9 | 174 | enum ttm_bo_type type; |
93225b0d | 175 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
57de4ba9 | 176 | size_t acc_size; |
771fe6b9 JG |
177 | int r; |
178 | ||
441921d5 DV |
179 | size = ALIGN(size, PAGE_SIZE); |
180 | ||
771fe6b9 JG |
181 | if (kernel) { |
182 | type = ttm_bo_type_kernel; | |
40f5cf99 AD |
183 | } else if (sg) { |
184 | type = ttm_bo_type_sg; | |
771fe6b9 JG |
185 | } else { |
186 | type = ttm_bo_type_device; | |
187 | } | |
4c788679 | 188 | *bo_ptr = NULL; |
2b66b50b | 189 | |
57de4ba9 JG |
190 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
191 | sizeof(struct radeon_bo)); | |
192 | ||
4c788679 JG |
193 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
194 | if (bo == NULL) | |
771fe6b9 | 195 | return -ENOMEM; |
441921d5 DV |
196 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
197 | if (unlikely(r)) { | |
198 | kfree(bo); | |
199 | return r; | |
200 | } | |
4c788679 | 201 | bo->rdev = rdev; |
4c788679 JG |
202 | bo->surface_reg = -1; |
203 | INIT_LIST_HEAD(&bo->list); | |
721604a1 | 204 | INIT_LIST_HEAD(&bo->va); |
bda72d58 MO |
205 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
206 | RADEON_GEM_DOMAIN_GTT | | |
207 | RADEON_GEM_DOMAIN_CPU); | |
02376d82 MD |
208 | |
209 | bo->flags = flags; | |
210 | /* PCI GART is always snooped */ | |
211 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
212 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | |
213 | ||
1fb107fc | 214 | radeon_ttm_placement_from_domain(bo, domain); |
5cc6fbab | 215 | /* Kernel allocation are uninterruptible */ |
db7fce39 | 216 | down_read(&rdev->pm.mclk_lock); |
1fb107fc | 217 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
0b91c4a1 | 218 | &bo->placement, page_align, !kernel, NULL, |
40f5cf99 | 219 | acc_size, sg, &radeon_ttm_bo_destroy); |
db7fce39 | 220 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 | 221 | if (unlikely(r != 0)) { |
771fe6b9 JG |
222 | return r; |
223 | } | |
4c788679 | 224 | *bo_ptr = bo; |
441921d5 | 225 | |
99ee7fac | 226 | trace_radeon_bo_create(bo); |
441921d5 | 227 | |
771fe6b9 JG |
228 | return 0; |
229 | } | |
230 | ||
4c788679 | 231 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
771fe6b9 | 232 | { |
4c788679 | 233 | bool is_iomem; |
771fe6b9 JG |
234 | int r; |
235 | ||
4c788679 | 236 | if (bo->kptr) { |
771fe6b9 | 237 | if (ptr) { |
4c788679 | 238 | *ptr = bo->kptr; |
771fe6b9 | 239 | } |
771fe6b9 JG |
240 | return 0; |
241 | } | |
4c788679 | 242 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
771fe6b9 JG |
243 | if (r) { |
244 | return r; | |
245 | } | |
4c788679 | 246 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
771fe6b9 | 247 | if (ptr) { |
4c788679 | 248 | *ptr = bo->kptr; |
771fe6b9 | 249 | } |
4c788679 | 250 | radeon_bo_check_tiling(bo, 0, 0); |
771fe6b9 JG |
251 | return 0; |
252 | } | |
253 | ||
4c788679 | 254 | void radeon_bo_kunmap(struct radeon_bo *bo) |
771fe6b9 | 255 | { |
4c788679 | 256 | if (bo->kptr == NULL) |
771fe6b9 | 257 | return; |
4c788679 JG |
258 | bo->kptr = NULL; |
259 | radeon_bo_check_tiling(bo, 0, 0); | |
260 | ttm_bo_kunmap(&bo->kmap); | |
771fe6b9 JG |
261 | } |
262 | ||
512d8afc CK |
263 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
264 | { | |
265 | if (bo == NULL) | |
266 | return NULL; | |
267 | ||
268 | ttm_bo_reference(&bo->tbo); | |
269 | return bo; | |
270 | } | |
271 | ||
4c788679 | 272 | void radeon_bo_unref(struct radeon_bo **bo) |
771fe6b9 | 273 | { |
4c788679 | 274 | struct ttm_buffer_object *tbo; |
f4b7fb94 | 275 | struct radeon_device *rdev; |
771fe6b9 | 276 | |
4c788679 | 277 | if ((*bo) == NULL) |
771fe6b9 | 278 | return; |
f4b7fb94 | 279 | rdev = (*bo)->rdev; |
4c788679 JG |
280 | tbo = &((*bo)->tbo); |
281 | ttm_bo_unref(&tbo); | |
282 | if (tbo == NULL) | |
283 | *bo = NULL; | |
771fe6b9 JG |
284 | } |
285 | ||
c4353016 MD |
286 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
287 | u64 *gpu_addr) | |
771fe6b9 | 288 | { |
312ea8da | 289 | int r, i; |
771fe6b9 | 290 | |
f72a113a CK |
291 | if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) |
292 | return -EPERM; | |
293 | ||
4c788679 JG |
294 | if (bo->pin_count) { |
295 | bo->pin_count++; | |
296 | if (gpu_addr) | |
297 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
d936622c MD |
298 | |
299 | if (max_offset != 0) { | |
300 | u64 domain_start; | |
301 | ||
302 | if (domain == RADEON_GEM_DOMAIN_VRAM) | |
303 | domain_start = bo->rdev->mc.vram_start; | |
304 | else | |
305 | domain_start = bo->rdev->mc.gtt_start; | |
e199fd42 MD |
306 | WARN_ON_ONCE(max_offset < |
307 | (radeon_bo_gpu_offset(bo) - domain_start)); | |
d936622c MD |
308 | } |
309 | ||
771fe6b9 JG |
310 | return 0; |
311 | } | |
312ea8da | 312 | radeon_ttm_placement_from_domain(bo, domain); |
f1217ed0 | 313 | for (i = 0; i < bo->placement.num_placement; i++) { |
3ca82da3 | 314 | /* force to pin into visible video ram */ |
b76ee67a MD |
315 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
316 | (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) | |
317 | bo->placements[i].lpfn = | |
318 | bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
f1217ed0 | 319 | else |
b76ee67a | 320 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
c4353016 | 321 | |
f1217ed0 | 322 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
c4353016 | 323 | } |
f1217ed0 | 324 | |
97a875cb | 325 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
4c788679 JG |
326 | if (likely(r == 0)) { |
327 | bo->pin_count = 1; | |
328 | if (gpu_addr != NULL) | |
329 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
71ecc97e AD |
330 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
331 | bo->rdev->vram_pin_size += radeon_bo_size(bo); | |
332 | else | |
333 | bo->rdev->gart_pin_size += radeon_bo_size(bo); | |
334 | } else { | |
4c788679 | 335 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
71ecc97e | 336 | } |
771fe6b9 JG |
337 | return r; |
338 | } | |
c4353016 MD |
339 | |
340 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |
341 | { | |
342 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | |
343 | } | |
771fe6b9 | 344 | |
4c788679 | 345 | int radeon_bo_unpin(struct radeon_bo *bo) |
771fe6b9 | 346 | { |
312ea8da | 347 | int r, i; |
771fe6b9 | 348 | |
4c788679 JG |
349 | if (!bo->pin_count) { |
350 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); | |
351 | return 0; | |
771fe6b9 | 352 | } |
4c788679 JG |
353 | bo->pin_count--; |
354 | if (bo->pin_count) | |
355 | return 0; | |
f1217ed0 CK |
356 | for (i = 0; i < bo->placement.num_placement; i++) { |
357 | bo->placements[i].lpfn = 0; | |
358 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | |
359 | } | |
97a875cb | 360 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
71ecc97e AD |
361 | if (likely(r == 0)) { |
362 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | |
363 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); | |
364 | else | |
365 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); | |
366 | } else { | |
4c788679 | 367 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
71ecc97e | 368 | } |
5cc6fbab | 369 | return r; |
cefb87ef DA |
370 | } |
371 | ||
4c788679 | 372 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
771fe6b9 | 373 | { |
d796d844 DA |
374 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
375 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | |
06b6476d AD |
376 | if (rdev->mc.igp_sideport_enabled == false) |
377 | /* Useless to evict on IGP chips */ | |
378 | return 0; | |
771fe6b9 JG |
379 | } |
380 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
381 | } | |
382 | ||
4c788679 | 383 | void radeon_bo_force_delete(struct radeon_device *rdev) |
771fe6b9 | 384 | { |
4c788679 | 385 | struct radeon_bo *bo, *n; |
771fe6b9 JG |
386 | |
387 | if (list_empty(&rdev->gem.objects)) { | |
388 | return; | |
389 | } | |
4c788679 JG |
390 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
391 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | |
771fe6b9 | 392 | mutex_lock(&rdev->ddev->struct_mutex); |
4c788679 | 393 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
31c3603d DV |
394 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
395 | *((unsigned long *)&bo->gem_base.refcount)); | |
4c788679 JG |
396 | mutex_lock(&bo->rdev->gem.mutex); |
397 | list_del_init(&bo->list); | |
398 | mutex_unlock(&bo->rdev->gem.mutex); | |
91132d6b | 399 | /* this should unref the ttm bo */ |
31c3603d | 400 | drm_gem_object_unreference(&bo->gem_base); |
771fe6b9 JG |
401 | mutex_unlock(&rdev->ddev->struct_mutex); |
402 | } | |
403 | } | |
404 | ||
4c788679 | 405 | int radeon_bo_init(struct radeon_device *rdev) |
771fe6b9 | 406 | { |
a4d68279 | 407 | /* Add an MTRR for the VRAM */ |
a0a53aa8 | 408 | if (!rdev->fastfb_working) { |
07ebea25 AL |
409 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
410 | rdev->mc.aper_size); | |
a0a53aa8 | 411 | } |
a4d68279 JG |
412 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
413 | rdev->mc.mc_vram_size >> 20, | |
414 | (unsigned long long)rdev->mc.aper_size >> 20); | |
415 | DRM_INFO("RAM width %dbits %cDR\n", | |
416 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | |
771fe6b9 JG |
417 | return radeon_ttm_init(rdev); |
418 | } | |
419 | ||
4c788679 | 420 | void radeon_bo_fini(struct radeon_device *rdev) |
771fe6b9 JG |
421 | { |
422 | radeon_ttm_fini(rdev); | |
07ebea25 | 423 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
771fe6b9 JG |
424 | } |
425 | ||
19dff56a MO |
426 | /* Returns how many bytes TTM can move per IB. |
427 | */ | |
428 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) | |
429 | { | |
430 | u64 real_vram_size = rdev->mc.real_vram_size; | |
431 | u64 vram_usage = atomic64_read(&rdev->vram_usage); | |
432 | ||
433 | /* This function is based on the current VRAM usage. | |
434 | * | |
435 | * - If all of VRAM is free, allow relocating the number of bytes that | |
436 | * is equal to 1/4 of the size of VRAM for this IB. | |
437 | ||
438 | * - If more than one half of VRAM is occupied, only allow relocating | |
439 | * 1 MB of data for this IB. | |
440 | * | |
441 | * - From 0 to one half of used VRAM, the threshold decreases | |
442 | * linearly. | |
443 | * __________________ | |
444 | * 1/4 of -|\ | | |
445 | * VRAM | \ | | |
446 | * | \ | | |
447 | * | \ | | |
448 | * | \ | | |
449 | * | \ | | |
450 | * | \ | | |
451 | * | \________|1 MB | |
452 | * |----------------| | |
453 | * VRAM 0 % 100 % | |
454 | * used used | |
455 | * | |
456 | * Note: It's a threshold, not a limit. The threshold must be crossed | |
457 | * for buffer relocations to stop, so any buffer of an arbitrary size | |
458 | * can be moved as long as the threshold isn't crossed before | |
459 | * the relocation takes place. We don't want to disable buffer | |
460 | * relocations completely. | |
461 | * | |
462 | * The idea is that buffers should be placed in VRAM at creation time | |
463 | * and TTM should only do a minimum number of relocations during | |
464 | * command submission. In practice, you need to submit at least | |
465 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | |
466 | * | |
467 | * Also, things can get pretty crazy under memory pressure and actual | |
468 | * VRAM usage can change a lot, so playing safe even at 50% does | |
469 | * consistently increase performance. | |
470 | */ | |
471 | ||
472 | u64 half_vram = real_vram_size >> 1; | |
473 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | |
474 | u64 bytes_moved_threshold = half_free_vram >> 1; | |
475 | return max(bytes_moved_threshold, 1024*1024ull); | |
476 | } | |
477 | ||
478 | int radeon_bo_list_validate(struct radeon_device *rdev, | |
479 | struct ww_acquire_ctx *ticket, | |
ecff665f | 480 | struct list_head *head, int ring) |
771fe6b9 | 481 | { |
df0af440 | 482 | struct radeon_cs_reloc *lobj; |
4c788679 | 483 | struct radeon_bo *bo; |
771fe6b9 | 484 | int r; |
19dff56a MO |
485 | u64 bytes_moved = 0, initial_bytes_moved; |
486 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); | |
771fe6b9 | 487 | |
58b4d720 | 488 | r = ttm_eu_reserve_buffers(ticket, head, true); |
771fe6b9 | 489 | if (unlikely(r != 0)) { |
771fe6b9 JG |
490 | return r; |
491 | } | |
19dff56a | 492 | |
147666fb | 493 | list_for_each_entry(lobj, head, tv.head) { |
df0af440 | 494 | bo = lobj->robj; |
4c788679 | 495 | if (!bo->pin_count) { |
ce6758c8 | 496 | u32 domain = lobj->prefered_domains; |
3852752c | 497 | u32 allowed = lobj->allowed_domains; |
19dff56a MO |
498 | u32 current_domain = |
499 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | |
500 | ||
501 | /* Check if this buffer will be moved and don't move it | |
502 | * if we have moved too many buffers for this IB already. | |
503 | * | |
504 | * Note that this allows moving at least one buffer of | |
505 | * any size, because it doesn't take the current "bo" | |
506 | * into account. We don't want to disallow buffer moves | |
507 | * completely. | |
508 | */ | |
3852752c | 509 | if ((allowed & current_domain) != 0 && |
19dff56a MO |
510 | (domain & current_domain) == 0 && /* will be moved */ |
511 | bytes_moved > bytes_moved_threshold) { | |
512 | /* don't move it */ | |
513 | domain = current_domain; | |
514 | } | |
515 | ||
20707874 AD |
516 | retry: |
517 | radeon_ttm_placement_from_domain(bo, domain); | |
f2ba57b5 | 518 | if (ring == R600_RING_TYPE_UVD_INDEX) |
3852752c | 519 | radeon_uvd_force_into_uvd_segment(bo, allowed); |
19dff56a MO |
520 | |
521 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); | |
522 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
523 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - | |
524 | initial_bytes_moved; | |
525 | ||
e376573f | 526 | if (unlikely(r)) { |
ce6758c8 CK |
527 | if (r != -ERESTARTSYS && |
528 | domain != lobj->allowed_domains) { | |
529 | domain = lobj->allowed_domains; | |
20707874 AD |
530 | goto retry; |
531 | } | |
1b6e5fd5 | 532 | ttm_eu_backoff_reservation(ticket, head); |
771fe6b9 | 533 | return r; |
e376573f | 534 | } |
771fe6b9 | 535 | } |
4c788679 JG |
536 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
537 | lobj->tiling_flags = bo->tiling_flags; | |
771fe6b9 JG |
538 | } |
539 | return 0; | |
540 | } | |
541 | ||
4c788679 | 542 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
771fe6b9 JG |
543 | struct vm_area_struct *vma) |
544 | { | |
4c788679 | 545 | return ttm_fbdev_mmap(vma, &bo->tbo); |
771fe6b9 JG |
546 | } |
547 | ||
550e2d92 | 548 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
771fe6b9 | 549 | { |
4c788679 | 550 | struct radeon_device *rdev = bo->rdev; |
e024e110 | 551 | struct radeon_surface_reg *reg; |
4c788679 | 552 | struct radeon_bo *old_object; |
e024e110 DA |
553 | int steal; |
554 | int i; | |
555 | ||
977c38d5 | 556 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
4c788679 JG |
557 | |
558 | if (!bo->tiling_flags) | |
e024e110 DA |
559 | return 0; |
560 | ||
4c788679 JG |
561 | if (bo->surface_reg >= 0) { |
562 | reg = &rdev->surface_regs[bo->surface_reg]; | |
563 | i = bo->surface_reg; | |
e024e110 DA |
564 | goto out; |
565 | } | |
566 | ||
567 | steal = -1; | |
568 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
569 | ||
570 | reg = &rdev->surface_regs[i]; | |
4c788679 | 571 | if (!reg->bo) |
e024e110 DA |
572 | break; |
573 | ||
4c788679 | 574 | old_object = reg->bo; |
e024e110 DA |
575 | if (old_object->pin_count == 0) |
576 | steal = i; | |
577 | } | |
578 | ||
579 | /* if we are all out */ | |
580 | if (i == RADEON_GEM_MAX_SURFACES) { | |
581 | if (steal == -1) | |
582 | return -ENOMEM; | |
583 | /* find someone with a surface reg and nuke their BO */ | |
584 | reg = &rdev->surface_regs[steal]; | |
4c788679 | 585 | old_object = reg->bo; |
e024e110 DA |
586 | /* blow away the mapping */ |
587 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
4c788679 | 588 | ttm_bo_unmap_virtual(&old_object->tbo); |
e024e110 DA |
589 | old_object->surface_reg = -1; |
590 | i = steal; | |
591 | } | |
592 | ||
4c788679 JG |
593 | bo->surface_reg = i; |
594 | reg->bo = bo; | |
e024e110 DA |
595 | |
596 | out: | |
4c788679 | 597 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
d961db75 | 598 | bo->tbo.mem.start << PAGE_SHIFT, |
4c788679 | 599 | bo->tbo.num_pages << PAGE_SHIFT); |
e024e110 DA |
600 | return 0; |
601 | } | |
602 | ||
4c788679 | 603 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
e024e110 | 604 | { |
4c788679 | 605 | struct radeon_device *rdev = bo->rdev; |
e024e110 DA |
606 | struct radeon_surface_reg *reg; |
607 | ||
4c788679 | 608 | if (bo->surface_reg == -1) |
e024e110 DA |
609 | return; |
610 | ||
4c788679 JG |
611 | reg = &rdev->surface_regs[bo->surface_reg]; |
612 | radeon_clear_surface_reg(rdev, bo->surface_reg); | |
e024e110 | 613 | |
4c788679 JG |
614 | reg->bo = NULL; |
615 | bo->surface_reg = -1; | |
e024e110 DA |
616 | } |
617 | ||
4c788679 JG |
618 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
619 | uint32_t tiling_flags, uint32_t pitch) | |
e024e110 | 620 | { |
285484e2 | 621 | struct radeon_device *rdev = bo->rdev; |
4c788679 JG |
622 | int r; |
623 | ||
285484e2 JG |
624 | if (rdev->family >= CHIP_CEDAR) { |
625 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | |
626 | ||
627 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | |
628 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | |
629 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | |
630 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | |
631 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | |
632 | switch (bankw) { | |
633 | case 0: | |
634 | case 1: | |
635 | case 2: | |
636 | case 4: | |
637 | case 8: | |
638 | break; | |
639 | default: | |
640 | return -EINVAL; | |
641 | } | |
642 | switch (bankh) { | |
643 | case 0: | |
644 | case 1: | |
645 | case 2: | |
646 | case 4: | |
647 | case 8: | |
648 | break; | |
649 | default: | |
650 | return -EINVAL; | |
651 | } | |
652 | switch (mtaspect) { | |
653 | case 0: | |
654 | case 1: | |
655 | case 2: | |
656 | case 4: | |
657 | case 8: | |
658 | break; | |
659 | default: | |
660 | return -EINVAL; | |
661 | } | |
662 | if (tilesplit > 6) { | |
663 | return -EINVAL; | |
664 | } | |
665 | if (stilesplit > 6) { | |
666 | return -EINVAL; | |
667 | } | |
668 | } | |
4c788679 JG |
669 | r = radeon_bo_reserve(bo, false); |
670 | if (unlikely(r != 0)) | |
671 | return r; | |
672 | bo->tiling_flags = tiling_flags; | |
673 | bo->pitch = pitch; | |
674 | radeon_bo_unreserve(bo); | |
675 | return 0; | |
e024e110 DA |
676 | } |
677 | ||
4c788679 JG |
678 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
679 | uint32_t *tiling_flags, | |
680 | uint32_t *pitch) | |
e024e110 | 681 | { |
977c38d5 ML |
682 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
683 | ||
e024e110 | 684 | if (tiling_flags) |
4c788679 | 685 | *tiling_flags = bo->tiling_flags; |
e024e110 | 686 | if (pitch) |
4c788679 | 687 | *pitch = bo->pitch; |
e024e110 DA |
688 | } |
689 | ||
4c788679 JG |
690 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
691 | bool force_drop) | |
e024e110 | 692 | { |
977c38d5 ML |
693 | if (!force_drop) |
694 | lockdep_assert_held(&bo->tbo.resv->lock.base); | |
4c788679 JG |
695 | |
696 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | |
e024e110 DA |
697 | return 0; |
698 | ||
699 | if (force_drop) { | |
4c788679 | 700 | radeon_bo_clear_surface_reg(bo); |
e024e110 DA |
701 | return 0; |
702 | } | |
703 | ||
4c788679 | 704 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
e024e110 DA |
705 | if (!has_moved) |
706 | return 0; | |
707 | ||
4c788679 JG |
708 | if (bo->surface_reg >= 0) |
709 | radeon_bo_clear_surface_reg(bo); | |
e024e110 DA |
710 | return 0; |
711 | } | |
712 | ||
4c788679 | 713 | if ((bo->surface_reg >= 0) && !has_moved) |
e024e110 DA |
714 | return 0; |
715 | ||
4c788679 | 716 | return radeon_bo_get_surface_reg(bo); |
e024e110 DA |
717 | } |
718 | ||
719 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
67e8e3f9 | 720 | struct ttm_mem_reg *new_mem) |
e024e110 | 721 | { |
d03d8589 | 722 | struct radeon_bo *rbo; |
67e8e3f9 | 723 | |
d03d8589 JG |
724 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
725 | return; | |
67e8e3f9 | 726 | |
d03d8589 | 727 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 728 | radeon_bo_check_tiling(rbo, 0, 1); |
721604a1 | 729 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
67e8e3f9 MO |
730 | |
731 | /* update statistics */ | |
732 | if (!new_mem) | |
733 | return; | |
734 | ||
735 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); | |
736 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); | |
e024e110 DA |
737 | } |
738 | ||
0a2d50e3 | 739 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
e024e110 | 740 | { |
0a2d50e3 | 741 | struct radeon_device *rdev; |
d03d8589 | 742 | struct radeon_bo *rbo; |
0a2d50e3 JG |
743 | unsigned long offset, size; |
744 | int r; | |
745 | ||
d03d8589 | 746 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
0a2d50e3 | 747 | return 0; |
d03d8589 | 748 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 749 | radeon_bo_check_tiling(rbo, 0, 0); |
0a2d50e3 | 750 | rdev = rbo->rdev; |
54409259 CK |
751 | if (bo->mem.mem_type != TTM_PL_VRAM) |
752 | return 0; | |
753 | ||
754 | size = bo->mem.num_pages << PAGE_SHIFT; | |
755 | offset = bo->mem.start << PAGE_SHIFT; | |
756 | if ((offset + size) <= rdev->mc.visible_vram_size) | |
757 | return 0; | |
758 | ||
759 | /* hurrah the memory is not visible ! */ | |
760 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | |
f1217ed0 | 761 | rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
54409259 CK |
762 | r = ttm_bo_validate(bo, &rbo->placement, false, false); |
763 | if (unlikely(r == -ENOMEM)) { | |
764 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
765 | return ttm_bo_validate(bo, &rbo->placement, false, false); | |
766 | } else if (unlikely(r != 0)) { | |
767 | return r; | |
0a2d50e3 | 768 | } |
54409259 CK |
769 | |
770 | offset = bo->mem.start << PAGE_SHIFT; | |
771 | /* this should never happen */ | |
772 | if ((offset + size) > rdev->mc.visible_vram_size) | |
773 | return -EINVAL; | |
774 | ||
0a2d50e3 | 775 | return 0; |
e024e110 | 776 | } |
ce580fab | 777 | |
83f30d0e | 778 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
ce580fab AK |
779 | { |
780 | int r; | |
781 | ||
12432354 | 782 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); |
ce580fab AK |
783 | if (unlikely(r != 0)) |
784 | return r; | |
ce580fab AK |
785 | if (mem_type) |
786 | *mem_type = bo->tbo.mem.mem_type; | |
f2c24b83 ML |
787 | |
788 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | |
ce580fab AK |
789 | ttm_bo_unreserve(&bo->tbo); |
790 | return r; | |
791 | } |