Commit | Line | Data |
---|---|---|
771fe6b9 JG |
1 | /* |
2 | * Copyright 2009 Jerome Glisse. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Jerome Glisse <glisse@freedesktop.org> | |
29 | * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com> | |
30 | * Dave Airlie | |
31 | */ | |
32 | #include <linux/list.h> | |
5a0e3ad6 | 33 | #include <linux/slab.h> |
771fe6b9 | 34 | #include <drm/drmP.h> |
760285e7 | 35 | #include <drm/radeon_drm.h> |
771fe6b9 | 36 | #include "radeon.h" |
99ee7fac | 37 | #include "radeon_trace.h" |
771fe6b9 | 38 | |
771fe6b9 JG |
39 | |
40 | int radeon_ttm_init(struct radeon_device *rdev); | |
41 | void radeon_ttm_fini(struct radeon_device *rdev); | |
4c788679 | 42 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); |
771fe6b9 JG |
43 | |
44 | /* | |
45 | * To exclude mutual BO access we rely on bo_reserve exclusion, as all | |
46 | * function are calling it. | |
47 | */ | |
48 | ||
67e8e3f9 MO |
49 | static void radeon_update_memory_usage(struct radeon_bo *bo, |
50 | unsigned mem_type, int sign) | |
51 | { | |
52 | struct radeon_device *rdev = bo->rdev; | |
53 | u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; | |
54 | ||
55 | switch (mem_type) { | |
56 | case TTM_PL_TT: | |
57 | if (sign > 0) | |
58 | atomic64_add(size, &rdev->gtt_usage); | |
59 | else | |
60 | atomic64_sub(size, &rdev->gtt_usage); | |
61 | break; | |
62 | case TTM_PL_VRAM: | |
63 | if (sign > 0) | |
64 | atomic64_add(size, &rdev->vram_usage); | |
65 | else | |
66 | atomic64_sub(size, &rdev->vram_usage); | |
67 | break; | |
68 | } | |
69 | } | |
70 | ||
4c788679 | 71 | static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) |
771fe6b9 | 72 | { |
4c788679 | 73 | struct radeon_bo *bo; |
771fe6b9 | 74 | |
4c788679 | 75 | bo = container_of(tbo, struct radeon_bo, tbo); |
67e8e3f9 MO |
76 | |
77 | radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); | |
341cb9e4 | 78 | radeon_mn_unregister(bo); |
67e8e3f9 | 79 | |
4c788679 JG |
80 | mutex_lock(&bo->rdev->gem.mutex); |
81 | list_del_init(&bo->list); | |
82 | mutex_unlock(&bo->rdev->gem.mutex); | |
83 | radeon_bo_clear_surface_reg(bo); | |
c265f24d | 84 | WARN_ON(!list_empty(&bo->va)); |
441921d5 | 85 | drm_gem_object_release(&bo->gem_base); |
4c788679 | 86 | kfree(bo); |
771fe6b9 JG |
87 | } |
88 | ||
d03d8589 JG |
89 | bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo) |
90 | { | |
91 | if (bo->destroy == &radeon_ttm_bo_destroy) | |
92 | return true; | |
93 | return false; | |
94 | } | |
95 | ||
312ea8da JG |
96 | void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) |
97 | { | |
deadcb36 | 98 | u32 c = 0, i; |
312ea8da | 99 | |
312ea8da | 100 | rbo->placement.placement = rbo->placements; |
20707874 | 101 | rbo->placement.busy_placement = rbo->placements; |
c9da4a4b MD |
102 | if (domain & RADEON_GEM_DOMAIN_VRAM) { |
103 | /* Try placing BOs which don't need CPU access outside of the | |
104 | * CPU accessible part of VRAM | |
105 | */ | |
106 | if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) && | |
107 | rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) { | |
108 | rbo->placements[c].fpfn = | |
109 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
110 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | | |
111 | TTM_PL_FLAG_UNCACHED | | |
112 | TTM_PL_FLAG_VRAM; | |
113 | } | |
114 | ||
115 | rbo->placements[c].fpfn = 0; | |
f1217ed0 CK |
116 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
117 | TTM_PL_FLAG_UNCACHED | | |
118 | TTM_PL_FLAG_VRAM; | |
c9da4a4b | 119 | } |
f1217ed0 | 120 | |
0d0b3e74 | 121 | if (domain & RADEON_GEM_DOMAIN_GTT) { |
02376d82 | 122 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
c9da4a4b | 123 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
124 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
125 | TTM_PL_FLAG_TT; | |
126 | ||
02376d82 MD |
127 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
128 | (rbo->rdev->flags & RADEON_IS_AGP)) { | |
c9da4a4b | 129 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
130 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
131 | TTM_PL_FLAG_UNCACHED | | |
02376d82 | 132 | TTM_PL_FLAG_TT; |
0d0b3e74 | 133 | } else { |
c9da4a4b | 134 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
135 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
136 | TTM_PL_FLAG_TT; | |
0d0b3e74 JG |
137 | } |
138 | } | |
f1217ed0 | 139 | |
0d0b3e74 | 140 | if (domain & RADEON_GEM_DOMAIN_CPU) { |
02376d82 | 141 | if (rbo->flags & RADEON_GEM_GTT_UC) { |
c9da4a4b | 142 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
143 | rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED | |
144 | TTM_PL_FLAG_SYSTEM; | |
145 | ||
02376d82 MD |
146 | } else if ((rbo->flags & RADEON_GEM_GTT_WC) || |
147 | rbo->rdev->flags & RADEON_IS_AGP) { | |
c9da4a4b | 148 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
149 | rbo->placements[c++].flags = TTM_PL_FLAG_WC | |
150 | TTM_PL_FLAG_UNCACHED | | |
02376d82 | 151 | TTM_PL_FLAG_SYSTEM; |
0d0b3e74 | 152 | } else { |
c9da4a4b | 153 | rbo->placements[c].fpfn = 0; |
f1217ed0 CK |
154 | rbo->placements[c++].flags = TTM_PL_FLAG_CACHED | |
155 | TTM_PL_FLAG_SYSTEM; | |
0d0b3e74 JG |
156 | } |
157 | } | |
c9da4a4b MD |
158 | if (!c) { |
159 | rbo->placements[c].fpfn = 0; | |
f1217ed0 CK |
160 | rbo->placements[c++].flags = TTM_PL_MASK_CACHING | |
161 | TTM_PL_FLAG_SYSTEM; | |
c9da4a4b | 162 | } |
f1217ed0 | 163 | |
312ea8da JG |
164 | rbo->placement.num_placement = c; |
165 | rbo->placement.num_busy_placement = c; | |
deadcb36 | 166 | |
f1217ed0 | 167 | for (i = 0; i < c; ++i) { |
c8584039 | 168 | if ((rbo->flags & RADEON_GEM_CPU_ACCESS) && |
c9da4a4b MD |
169 | (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
170 | !rbo->placements[i].fpfn) | |
c8584039 MD |
171 | rbo->placements[i].lpfn = |
172 | rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
173 | else | |
174 | rbo->placements[i].lpfn = 0; | |
f1217ed0 CK |
175 | } |
176 | ||
deadcb36 LK |
177 | /* |
178 | * Use two-ended allocation depending on the buffer size to | |
179 | * improve fragmentation quality. | |
180 | * 512kb was measured as the most optimal number. | |
181 | */ | |
a8b5ebe6 | 182 | if (rbo->tbo.mem.size > 512 * 1024) { |
deadcb36 | 183 | for (i = 0; i < c; i++) { |
f1217ed0 | 184 | rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN; |
deadcb36 LK |
185 | } |
186 | } | |
312ea8da JG |
187 | } |
188 | ||
441921d5 | 189 | int radeon_bo_create(struct radeon_device *rdev, |
831b6966 ML |
190 | unsigned long size, int byte_align, bool kernel, |
191 | u32 domain, u32 flags, struct sg_table *sg, | |
192 | struct reservation_object *resv, | |
193 | struct radeon_bo **bo_ptr) | |
771fe6b9 | 194 | { |
4c788679 | 195 | struct radeon_bo *bo; |
771fe6b9 | 196 | enum ttm_bo_type type; |
93225b0d | 197 | unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT; |
57de4ba9 | 198 | size_t acc_size; |
771fe6b9 JG |
199 | int r; |
200 | ||
441921d5 DV |
201 | size = ALIGN(size, PAGE_SIZE); |
202 | ||
771fe6b9 JG |
203 | if (kernel) { |
204 | type = ttm_bo_type_kernel; | |
40f5cf99 AD |
205 | } else if (sg) { |
206 | type = ttm_bo_type_sg; | |
771fe6b9 JG |
207 | } else { |
208 | type = ttm_bo_type_device; | |
209 | } | |
4c788679 | 210 | *bo_ptr = NULL; |
2b66b50b | 211 | |
57de4ba9 JG |
212 | acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size, |
213 | sizeof(struct radeon_bo)); | |
214 | ||
4c788679 JG |
215 | bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL); |
216 | if (bo == NULL) | |
771fe6b9 | 217 | return -ENOMEM; |
441921d5 DV |
218 | r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size); |
219 | if (unlikely(r)) { | |
220 | kfree(bo); | |
221 | return r; | |
222 | } | |
4c788679 | 223 | bo->rdev = rdev; |
4c788679 JG |
224 | bo->surface_reg = -1; |
225 | INIT_LIST_HEAD(&bo->list); | |
721604a1 | 226 | INIT_LIST_HEAD(&bo->va); |
bda72d58 MO |
227 | bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM | |
228 | RADEON_GEM_DOMAIN_GTT | | |
229 | RADEON_GEM_DOMAIN_CPU); | |
02376d82 MD |
230 | |
231 | bo->flags = flags; | |
232 | /* PCI GART is always snooped */ | |
233 | if (!(rdev->flags & RADEON_IS_PCIE)) | |
234 | bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); | |
235 | ||
1fb107fc | 236 | radeon_ttm_placement_from_domain(bo, domain); |
5cc6fbab | 237 | /* Kernel allocation are uninterruptible */ |
db7fce39 | 238 | down_read(&rdev->pm.mclk_lock); |
1fb107fc | 239 | r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type, |
0b91c4a1 | 240 | &bo->placement, page_align, !kernel, NULL, |
831b6966 | 241 | acc_size, sg, resv, &radeon_ttm_bo_destroy); |
db7fce39 | 242 | up_read(&rdev->pm.mclk_lock); |
771fe6b9 | 243 | if (unlikely(r != 0)) { |
771fe6b9 JG |
244 | return r; |
245 | } | |
4c788679 | 246 | *bo_ptr = bo; |
441921d5 | 247 | |
99ee7fac | 248 | trace_radeon_bo_create(bo); |
441921d5 | 249 | |
771fe6b9 JG |
250 | return 0; |
251 | } | |
252 | ||
4c788679 | 253 | int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) |
771fe6b9 | 254 | { |
4c788679 | 255 | bool is_iomem; |
771fe6b9 JG |
256 | int r; |
257 | ||
4c788679 | 258 | if (bo->kptr) { |
771fe6b9 | 259 | if (ptr) { |
4c788679 | 260 | *ptr = bo->kptr; |
771fe6b9 | 261 | } |
771fe6b9 JG |
262 | return 0; |
263 | } | |
4c788679 | 264 | r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); |
771fe6b9 JG |
265 | if (r) { |
266 | return r; | |
267 | } | |
4c788679 | 268 | bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); |
771fe6b9 | 269 | if (ptr) { |
4c788679 | 270 | *ptr = bo->kptr; |
771fe6b9 | 271 | } |
4c788679 | 272 | radeon_bo_check_tiling(bo, 0, 0); |
771fe6b9 JG |
273 | return 0; |
274 | } | |
275 | ||
4c788679 | 276 | void radeon_bo_kunmap(struct radeon_bo *bo) |
771fe6b9 | 277 | { |
4c788679 | 278 | if (bo->kptr == NULL) |
771fe6b9 | 279 | return; |
4c788679 JG |
280 | bo->kptr = NULL; |
281 | radeon_bo_check_tiling(bo, 0, 0); | |
282 | ttm_bo_kunmap(&bo->kmap); | |
771fe6b9 JG |
283 | } |
284 | ||
512d8afc CK |
285 | struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo) |
286 | { | |
287 | if (bo == NULL) | |
288 | return NULL; | |
289 | ||
290 | ttm_bo_reference(&bo->tbo); | |
291 | return bo; | |
292 | } | |
293 | ||
4c788679 | 294 | void radeon_bo_unref(struct radeon_bo **bo) |
771fe6b9 | 295 | { |
4c788679 | 296 | struct ttm_buffer_object *tbo; |
f4b7fb94 | 297 | struct radeon_device *rdev; |
771fe6b9 | 298 | |
4c788679 | 299 | if ((*bo) == NULL) |
771fe6b9 | 300 | return; |
f4b7fb94 | 301 | rdev = (*bo)->rdev; |
4c788679 JG |
302 | tbo = &((*bo)->tbo); |
303 | ttm_bo_unref(&tbo); | |
304 | if (tbo == NULL) | |
305 | *bo = NULL; | |
771fe6b9 JG |
306 | } |
307 | ||
c4353016 MD |
308 | int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, |
309 | u64 *gpu_addr) | |
771fe6b9 | 310 | { |
312ea8da | 311 | int r, i; |
771fe6b9 | 312 | |
f72a113a CK |
313 | if (radeon_ttm_tt_has_userptr(bo->tbo.ttm)) |
314 | return -EPERM; | |
315 | ||
4c788679 JG |
316 | if (bo->pin_count) { |
317 | bo->pin_count++; | |
318 | if (gpu_addr) | |
319 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
d936622c MD |
320 | |
321 | if (max_offset != 0) { | |
322 | u64 domain_start; | |
323 | ||
324 | if (domain == RADEON_GEM_DOMAIN_VRAM) | |
325 | domain_start = bo->rdev->mc.vram_start; | |
326 | else | |
327 | domain_start = bo->rdev->mc.gtt_start; | |
e199fd42 MD |
328 | WARN_ON_ONCE(max_offset < |
329 | (radeon_bo_gpu_offset(bo) - domain_start)); | |
d936622c MD |
330 | } |
331 | ||
771fe6b9 JG |
332 | return 0; |
333 | } | |
312ea8da | 334 | radeon_ttm_placement_from_domain(bo, domain); |
f1217ed0 | 335 | for (i = 0; i < bo->placement.num_placement; i++) { |
3ca82da3 | 336 | /* force to pin into visible video ram */ |
b76ee67a | 337 | if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) && |
f266f04d | 338 | !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) && |
b76ee67a MD |
339 | (!max_offset || max_offset > bo->rdev->mc.visible_vram_size)) |
340 | bo->placements[i].lpfn = | |
341 | bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; | |
f1217ed0 | 342 | else |
b76ee67a | 343 | bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; |
c4353016 | 344 | |
f1217ed0 | 345 | bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; |
c4353016 | 346 | } |
f1217ed0 | 347 | |
97a875cb | 348 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
4c788679 JG |
349 | if (likely(r == 0)) { |
350 | bo->pin_count = 1; | |
351 | if (gpu_addr != NULL) | |
352 | *gpu_addr = radeon_bo_gpu_offset(bo); | |
71ecc97e AD |
353 | if (domain == RADEON_GEM_DOMAIN_VRAM) |
354 | bo->rdev->vram_pin_size += radeon_bo_size(bo); | |
355 | else | |
356 | bo->rdev->gart_pin_size += radeon_bo_size(bo); | |
357 | } else { | |
4c788679 | 358 | dev_err(bo->rdev->dev, "%p pin failed\n", bo); |
71ecc97e | 359 | } |
771fe6b9 JG |
360 | return r; |
361 | } | |
c4353016 MD |
362 | |
363 | int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) | |
364 | { | |
365 | return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); | |
366 | } | |
771fe6b9 | 367 | |
4c788679 | 368 | int radeon_bo_unpin(struct radeon_bo *bo) |
771fe6b9 | 369 | { |
312ea8da | 370 | int r, i; |
771fe6b9 | 371 | |
4c788679 JG |
372 | if (!bo->pin_count) { |
373 | dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); | |
374 | return 0; | |
771fe6b9 | 375 | } |
4c788679 JG |
376 | bo->pin_count--; |
377 | if (bo->pin_count) | |
378 | return 0; | |
f1217ed0 CK |
379 | for (i = 0; i < bo->placement.num_placement; i++) { |
380 | bo->placements[i].lpfn = 0; | |
381 | bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; | |
382 | } | |
97a875cb | 383 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); |
71ecc97e AD |
384 | if (likely(r == 0)) { |
385 | if (bo->tbo.mem.mem_type == TTM_PL_VRAM) | |
386 | bo->rdev->vram_pin_size -= radeon_bo_size(bo); | |
387 | else | |
388 | bo->rdev->gart_pin_size -= radeon_bo_size(bo); | |
389 | } else { | |
4c788679 | 390 | dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); |
71ecc97e | 391 | } |
5cc6fbab | 392 | return r; |
cefb87ef DA |
393 | } |
394 | ||
4c788679 | 395 | int radeon_bo_evict_vram(struct radeon_device *rdev) |
771fe6b9 | 396 | { |
d796d844 DA |
397 | /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ |
398 | if (0 && (rdev->flags & RADEON_IS_IGP)) { | |
06b6476d AD |
399 | if (rdev->mc.igp_sideport_enabled == false) |
400 | /* Useless to evict on IGP chips */ | |
401 | return 0; | |
771fe6b9 JG |
402 | } |
403 | return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); | |
404 | } | |
405 | ||
4c788679 | 406 | void radeon_bo_force_delete(struct radeon_device *rdev) |
771fe6b9 | 407 | { |
4c788679 | 408 | struct radeon_bo *bo, *n; |
771fe6b9 JG |
409 | |
410 | if (list_empty(&rdev->gem.objects)) { | |
411 | return; | |
412 | } | |
4c788679 JG |
413 | dev_err(rdev->dev, "Userspace still has active objects !\n"); |
414 | list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) { | |
771fe6b9 | 415 | mutex_lock(&rdev->ddev->struct_mutex); |
4c788679 | 416 | dev_err(rdev->dev, "%p %p %lu %lu force free\n", |
31c3603d DV |
417 | &bo->gem_base, bo, (unsigned long)bo->gem_base.size, |
418 | *((unsigned long *)&bo->gem_base.refcount)); | |
4c788679 JG |
419 | mutex_lock(&bo->rdev->gem.mutex); |
420 | list_del_init(&bo->list); | |
421 | mutex_unlock(&bo->rdev->gem.mutex); | |
91132d6b | 422 | /* this should unref the ttm bo */ |
31c3603d | 423 | drm_gem_object_unreference(&bo->gem_base); |
771fe6b9 JG |
424 | mutex_unlock(&rdev->ddev->struct_mutex); |
425 | } | |
426 | } | |
427 | ||
4c788679 | 428 | int radeon_bo_init(struct radeon_device *rdev) |
771fe6b9 | 429 | { |
a4d68279 | 430 | /* Add an MTRR for the VRAM */ |
a0a53aa8 | 431 | if (!rdev->fastfb_working) { |
07ebea25 AL |
432 | rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base, |
433 | rdev->mc.aper_size); | |
a0a53aa8 | 434 | } |
a4d68279 JG |
435 | DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n", |
436 | rdev->mc.mc_vram_size >> 20, | |
437 | (unsigned long long)rdev->mc.aper_size >> 20); | |
438 | DRM_INFO("RAM width %dbits %cDR\n", | |
439 | rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S'); | |
771fe6b9 JG |
440 | return radeon_ttm_init(rdev); |
441 | } | |
442 | ||
4c788679 | 443 | void radeon_bo_fini(struct radeon_device *rdev) |
771fe6b9 JG |
444 | { |
445 | radeon_ttm_fini(rdev); | |
07ebea25 | 446 | arch_phys_wc_del(rdev->mc.vram_mtrr); |
771fe6b9 JG |
447 | } |
448 | ||
19dff56a MO |
449 | /* Returns how many bytes TTM can move per IB. |
450 | */ | |
451 | static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev) | |
452 | { | |
453 | u64 real_vram_size = rdev->mc.real_vram_size; | |
454 | u64 vram_usage = atomic64_read(&rdev->vram_usage); | |
455 | ||
456 | /* This function is based on the current VRAM usage. | |
457 | * | |
458 | * - If all of VRAM is free, allow relocating the number of bytes that | |
459 | * is equal to 1/4 of the size of VRAM for this IB. | |
460 | ||
461 | * - If more than one half of VRAM is occupied, only allow relocating | |
462 | * 1 MB of data for this IB. | |
463 | * | |
464 | * - From 0 to one half of used VRAM, the threshold decreases | |
465 | * linearly. | |
466 | * __________________ | |
467 | * 1/4 of -|\ | | |
468 | * VRAM | \ | | |
469 | * | \ | | |
470 | * | \ | | |
471 | * | \ | | |
472 | * | \ | | |
473 | * | \ | | |
474 | * | \________|1 MB | |
475 | * |----------------| | |
476 | * VRAM 0 % 100 % | |
477 | * used used | |
478 | * | |
479 | * Note: It's a threshold, not a limit. The threshold must be crossed | |
480 | * for buffer relocations to stop, so any buffer of an arbitrary size | |
481 | * can be moved as long as the threshold isn't crossed before | |
482 | * the relocation takes place. We don't want to disable buffer | |
483 | * relocations completely. | |
484 | * | |
485 | * The idea is that buffers should be placed in VRAM at creation time | |
486 | * and TTM should only do a minimum number of relocations during | |
487 | * command submission. In practice, you need to submit at least | |
488 | * a dozen IBs to move all buffers to VRAM if they are in GTT. | |
489 | * | |
490 | * Also, things can get pretty crazy under memory pressure and actual | |
491 | * VRAM usage can change a lot, so playing safe even at 50% does | |
492 | * consistently increase performance. | |
493 | */ | |
494 | ||
495 | u64 half_vram = real_vram_size >> 1; | |
496 | u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage; | |
497 | u64 bytes_moved_threshold = half_free_vram >> 1; | |
498 | return max(bytes_moved_threshold, 1024*1024ull); | |
499 | } | |
500 | ||
501 | int radeon_bo_list_validate(struct radeon_device *rdev, | |
502 | struct ww_acquire_ctx *ticket, | |
ecff665f | 503 | struct list_head *head, int ring) |
771fe6b9 | 504 | { |
1d0c0942 | 505 | struct radeon_bo_list *lobj; |
466be338 | 506 | struct list_head duplicates; |
771fe6b9 | 507 | int r; |
19dff56a MO |
508 | u64 bytes_moved = 0, initial_bytes_moved; |
509 | u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); | |
771fe6b9 | 510 | |
466be338 CK |
511 | INIT_LIST_HEAD(&duplicates); |
512 | r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); | |
771fe6b9 | 513 | if (unlikely(r != 0)) { |
771fe6b9 JG |
514 | return r; |
515 | } | |
19dff56a | 516 | |
147666fb | 517 | list_for_each_entry(lobj, head, tv.head) { |
466be338 | 518 | struct radeon_bo *bo = lobj->robj; |
4c788679 | 519 | if (!bo->pin_count) { |
ce6758c8 | 520 | u32 domain = lobj->prefered_domains; |
3852752c | 521 | u32 allowed = lobj->allowed_domains; |
19dff56a MO |
522 | u32 current_domain = |
523 | radeon_mem_type_to_domain(bo->tbo.mem.mem_type); | |
524 | ||
355a7018 TH |
525 | WARN_ONCE(bo->gem_base.dumb, |
526 | "GPU use of dumb buffer is illegal.\n"); | |
527 | ||
19dff56a MO |
528 | /* Check if this buffer will be moved and don't move it |
529 | * if we have moved too many buffers for this IB already. | |
530 | * | |
531 | * Note that this allows moving at least one buffer of | |
532 | * any size, because it doesn't take the current "bo" | |
533 | * into account. We don't want to disallow buffer moves | |
534 | * completely. | |
535 | */ | |
3852752c | 536 | if ((allowed & current_domain) != 0 && |
19dff56a MO |
537 | (domain & current_domain) == 0 && /* will be moved */ |
538 | bytes_moved > bytes_moved_threshold) { | |
539 | /* don't move it */ | |
540 | domain = current_domain; | |
541 | } | |
542 | ||
20707874 AD |
543 | retry: |
544 | radeon_ttm_placement_from_domain(bo, domain); | |
f2ba57b5 | 545 | if (ring == R600_RING_TYPE_UVD_INDEX) |
3852752c | 546 | radeon_uvd_force_into_uvd_segment(bo, allowed); |
19dff56a MO |
547 | |
548 | initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved); | |
549 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
550 | bytes_moved += atomic64_read(&rdev->num_bytes_moved) - | |
551 | initial_bytes_moved; | |
552 | ||
e376573f | 553 | if (unlikely(r)) { |
ce6758c8 CK |
554 | if (r != -ERESTARTSYS && |
555 | domain != lobj->allowed_domains) { | |
556 | domain = lobj->allowed_domains; | |
20707874 AD |
557 | goto retry; |
558 | } | |
1b6e5fd5 | 559 | ttm_eu_backoff_reservation(ticket, head); |
771fe6b9 | 560 | return r; |
e376573f | 561 | } |
771fe6b9 | 562 | } |
4c788679 JG |
563 | lobj->gpu_offset = radeon_bo_gpu_offset(bo); |
564 | lobj->tiling_flags = bo->tiling_flags; | |
771fe6b9 | 565 | } |
466be338 CK |
566 | |
567 | list_for_each_entry(lobj, &duplicates, tv.head) { | |
568 | lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj); | |
569 | lobj->tiling_flags = lobj->robj->tiling_flags; | |
570 | } | |
571 | ||
771fe6b9 JG |
572 | return 0; |
573 | } | |
574 | ||
4c788679 | 575 | int radeon_bo_fbdev_mmap(struct radeon_bo *bo, |
771fe6b9 JG |
576 | struct vm_area_struct *vma) |
577 | { | |
4c788679 | 578 | return ttm_fbdev_mmap(vma, &bo->tbo); |
771fe6b9 JG |
579 | } |
580 | ||
550e2d92 | 581 | int radeon_bo_get_surface_reg(struct radeon_bo *bo) |
771fe6b9 | 582 | { |
4c788679 | 583 | struct radeon_device *rdev = bo->rdev; |
e024e110 | 584 | struct radeon_surface_reg *reg; |
4c788679 | 585 | struct radeon_bo *old_object; |
e024e110 DA |
586 | int steal; |
587 | int i; | |
588 | ||
977c38d5 | 589 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
4c788679 JG |
590 | |
591 | if (!bo->tiling_flags) | |
e024e110 DA |
592 | return 0; |
593 | ||
4c788679 JG |
594 | if (bo->surface_reg >= 0) { |
595 | reg = &rdev->surface_regs[bo->surface_reg]; | |
596 | i = bo->surface_reg; | |
e024e110 DA |
597 | goto out; |
598 | } | |
599 | ||
600 | steal = -1; | |
601 | for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) { | |
602 | ||
603 | reg = &rdev->surface_regs[i]; | |
4c788679 | 604 | if (!reg->bo) |
e024e110 DA |
605 | break; |
606 | ||
4c788679 | 607 | old_object = reg->bo; |
e024e110 DA |
608 | if (old_object->pin_count == 0) |
609 | steal = i; | |
610 | } | |
611 | ||
612 | /* if we are all out */ | |
613 | if (i == RADEON_GEM_MAX_SURFACES) { | |
614 | if (steal == -1) | |
615 | return -ENOMEM; | |
616 | /* find someone with a surface reg and nuke their BO */ | |
617 | reg = &rdev->surface_regs[steal]; | |
4c788679 | 618 | old_object = reg->bo; |
e024e110 DA |
619 | /* blow away the mapping */ |
620 | DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object); | |
4c788679 | 621 | ttm_bo_unmap_virtual(&old_object->tbo); |
e024e110 DA |
622 | old_object->surface_reg = -1; |
623 | i = steal; | |
624 | } | |
625 | ||
4c788679 JG |
626 | bo->surface_reg = i; |
627 | reg->bo = bo; | |
e024e110 DA |
628 | |
629 | out: | |
4c788679 | 630 | radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch, |
d961db75 | 631 | bo->tbo.mem.start << PAGE_SHIFT, |
4c788679 | 632 | bo->tbo.num_pages << PAGE_SHIFT); |
e024e110 DA |
633 | return 0; |
634 | } | |
635 | ||
4c788679 | 636 | static void radeon_bo_clear_surface_reg(struct radeon_bo *bo) |
e024e110 | 637 | { |
4c788679 | 638 | struct radeon_device *rdev = bo->rdev; |
e024e110 DA |
639 | struct radeon_surface_reg *reg; |
640 | ||
4c788679 | 641 | if (bo->surface_reg == -1) |
e024e110 DA |
642 | return; |
643 | ||
4c788679 JG |
644 | reg = &rdev->surface_regs[bo->surface_reg]; |
645 | radeon_clear_surface_reg(rdev, bo->surface_reg); | |
e024e110 | 646 | |
4c788679 JG |
647 | reg->bo = NULL; |
648 | bo->surface_reg = -1; | |
e024e110 DA |
649 | } |
650 | ||
4c788679 JG |
651 | int radeon_bo_set_tiling_flags(struct radeon_bo *bo, |
652 | uint32_t tiling_flags, uint32_t pitch) | |
e024e110 | 653 | { |
285484e2 | 654 | struct radeon_device *rdev = bo->rdev; |
4c788679 JG |
655 | int r; |
656 | ||
285484e2 JG |
657 | if (rdev->family >= CHIP_CEDAR) { |
658 | unsigned bankw, bankh, mtaspect, tilesplit, stilesplit; | |
659 | ||
660 | bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK; | |
661 | bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK; | |
662 | mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK; | |
663 | tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK; | |
664 | stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK; | |
665 | switch (bankw) { | |
666 | case 0: | |
667 | case 1: | |
668 | case 2: | |
669 | case 4: | |
670 | case 8: | |
671 | break; | |
672 | default: | |
673 | return -EINVAL; | |
674 | } | |
675 | switch (bankh) { | |
676 | case 0: | |
677 | case 1: | |
678 | case 2: | |
679 | case 4: | |
680 | case 8: | |
681 | break; | |
682 | default: | |
683 | return -EINVAL; | |
684 | } | |
685 | switch (mtaspect) { | |
686 | case 0: | |
687 | case 1: | |
688 | case 2: | |
689 | case 4: | |
690 | case 8: | |
691 | break; | |
692 | default: | |
693 | return -EINVAL; | |
694 | } | |
695 | if (tilesplit > 6) { | |
696 | return -EINVAL; | |
697 | } | |
698 | if (stilesplit > 6) { | |
699 | return -EINVAL; | |
700 | } | |
701 | } | |
4c788679 JG |
702 | r = radeon_bo_reserve(bo, false); |
703 | if (unlikely(r != 0)) | |
704 | return r; | |
705 | bo->tiling_flags = tiling_flags; | |
706 | bo->pitch = pitch; | |
707 | radeon_bo_unreserve(bo); | |
708 | return 0; | |
e024e110 DA |
709 | } |
710 | ||
4c788679 JG |
711 | void radeon_bo_get_tiling_flags(struct radeon_bo *bo, |
712 | uint32_t *tiling_flags, | |
713 | uint32_t *pitch) | |
e024e110 | 714 | { |
977c38d5 ML |
715 | lockdep_assert_held(&bo->tbo.resv->lock.base); |
716 | ||
e024e110 | 717 | if (tiling_flags) |
4c788679 | 718 | *tiling_flags = bo->tiling_flags; |
e024e110 | 719 | if (pitch) |
4c788679 | 720 | *pitch = bo->pitch; |
e024e110 DA |
721 | } |
722 | ||
4c788679 JG |
723 | int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, |
724 | bool force_drop) | |
e024e110 | 725 | { |
977c38d5 ML |
726 | if (!force_drop) |
727 | lockdep_assert_held(&bo->tbo.resv->lock.base); | |
4c788679 JG |
728 | |
729 | if (!(bo->tiling_flags & RADEON_TILING_SURFACE)) | |
e024e110 DA |
730 | return 0; |
731 | ||
732 | if (force_drop) { | |
4c788679 | 733 | radeon_bo_clear_surface_reg(bo); |
e024e110 DA |
734 | return 0; |
735 | } | |
736 | ||
4c788679 | 737 | if (bo->tbo.mem.mem_type != TTM_PL_VRAM) { |
e024e110 DA |
738 | if (!has_moved) |
739 | return 0; | |
740 | ||
4c788679 JG |
741 | if (bo->surface_reg >= 0) |
742 | radeon_bo_clear_surface_reg(bo); | |
e024e110 DA |
743 | return 0; |
744 | } | |
745 | ||
4c788679 | 746 | if ((bo->surface_reg >= 0) && !has_moved) |
e024e110 DA |
747 | return 0; |
748 | ||
4c788679 | 749 | return radeon_bo_get_surface_reg(bo); |
e024e110 DA |
750 | } |
751 | ||
752 | void radeon_bo_move_notify(struct ttm_buffer_object *bo, | |
67e8e3f9 | 753 | struct ttm_mem_reg *new_mem) |
e024e110 | 754 | { |
d03d8589 | 755 | struct radeon_bo *rbo; |
67e8e3f9 | 756 | |
d03d8589 JG |
757 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
758 | return; | |
67e8e3f9 | 759 | |
d03d8589 | 760 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 761 | radeon_bo_check_tiling(rbo, 0, 1); |
721604a1 | 762 | radeon_vm_bo_invalidate(rbo->rdev, rbo); |
67e8e3f9 MO |
763 | |
764 | /* update statistics */ | |
765 | if (!new_mem) | |
766 | return; | |
767 | ||
768 | radeon_update_memory_usage(rbo, bo->mem.mem_type, -1); | |
769 | radeon_update_memory_usage(rbo, new_mem->mem_type, 1); | |
e024e110 DA |
770 | } |
771 | ||
0a2d50e3 | 772 | int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) |
e024e110 | 773 | { |
0a2d50e3 | 774 | struct radeon_device *rdev; |
d03d8589 | 775 | struct radeon_bo *rbo; |
c9da4a4b MD |
776 | unsigned long offset, size, lpfn; |
777 | int i, r; | |
0a2d50e3 | 778 | |
d03d8589 | 779 | if (!radeon_ttm_bo_is_radeon_bo(bo)) |
0a2d50e3 | 780 | return 0; |
d03d8589 | 781 | rbo = container_of(bo, struct radeon_bo, tbo); |
4c788679 | 782 | radeon_bo_check_tiling(rbo, 0, 0); |
0a2d50e3 | 783 | rdev = rbo->rdev; |
54409259 CK |
784 | if (bo->mem.mem_type != TTM_PL_VRAM) |
785 | return 0; | |
786 | ||
787 | size = bo->mem.num_pages << PAGE_SHIFT; | |
788 | offset = bo->mem.start << PAGE_SHIFT; | |
789 | if ((offset + size) <= rdev->mc.visible_vram_size) | |
790 | return 0; | |
791 | ||
792 | /* hurrah the memory is not visible ! */ | |
793 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); | |
c9da4a4b MD |
794 | lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT; |
795 | for (i = 0; i < rbo->placement.num_placement; i++) { | |
796 | /* Force into visible VRAM */ | |
797 | if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) && | |
798 | (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn)) | |
799 | rbo->placements[i].lpfn = lpfn; | |
800 | } | |
54409259 CK |
801 | r = ttm_bo_validate(bo, &rbo->placement, false, false); |
802 | if (unlikely(r == -ENOMEM)) { | |
803 | radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); | |
804 | return ttm_bo_validate(bo, &rbo->placement, false, false); | |
805 | } else if (unlikely(r != 0)) { | |
806 | return r; | |
0a2d50e3 | 807 | } |
54409259 CK |
808 | |
809 | offset = bo->mem.start << PAGE_SHIFT; | |
810 | /* this should never happen */ | |
811 | if ((offset + size) > rdev->mc.visible_vram_size) | |
812 | return -EINVAL; | |
813 | ||
0a2d50e3 | 814 | return 0; |
e024e110 | 815 | } |
ce580fab | 816 | |
83f30d0e | 817 | int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) |
ce580fab AK |
818 | { |
819 | int r; | |
820 | ||
12432354 | 821 | r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL); |
ce580fab AK |
822 | if (unlikely(r != 0)) |
823 | return r; | |
ce580fab AK |
824 | if (mem_type) |
825 | *mem_type = bo->tbo.mem.mem_type; | |
f2c24b83 ML |
826 | |
827 | r = ttm_bo_wait(&bo->tbo, true, true, no_wait); | |
ce580fab AK |
828 | ttm_bo_unreserve(&bo->tbo); |
829 | return r; | |
830 | } | |
587cdda8 CK |
831 | |
832 | /** | |
833 | * radeon_bo_fence - add fence to buffer object | |
834 | * | |
835 | * @bo: buffer object in question | |
836 | * @fence: fence to add | |
837 | * @shared: true if fence should be added shared | |
838 | * | |
839 | */ | |
840 | void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, | |
841 | bool shared) | |
842 | { | |
843 | struct reservation_object *resv = bo->tbo.resv; | |
844 | ||
845 | if (shared) | |
846 | reservation_object_add_shared_fence(resv, &fence->base); | |
847 | else | |
848 | reservation_object_add_excl_fence(resv, &fence->base); | |
849 | } |