drm/ttm: Hide the implementation details of reservation
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_object.c
CommitLineData
771fe6b9
JG
1/*
2 * Copyright 2009 Jerome Glisse.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 */
26/*
27 * Authors:
28 * Jerome Glisse <glisse@freedesktop.org>
29 * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
30 * Dave Airlie
31 */
32#include <linux/list.h>
5a0e3ad6 33#include <linux/slab.h>
771fe6b9 34#include <drm/drmP.h>
760285e7 35#include <drm/radeon_drm.h>
771fe6b9 36#include "radeon.h"
99ee7fac 37#include "radeon_trace.h"
771fe6b9 38
771fe6b9
JG
39
40int radeon_ttm_init(struct radeon_device *rdev);
41void radeon_ttm_fini(struct radeon_device *rdev);
4c788679 42static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
771fe6b9
JG
43
44/*
45 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
46 * function are calling it.
47 */
48
2f43651c 49static void radeon_bo_clear_va(struct radeon_bo *bo)
721604a1
JG
50{
51 struct radeon_bo_va *bo_va, *tmp;
52
53 list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
54 /* remove from all vm address space */
e971bd5e 55 radeon_vm_bo_rmv(bo->rdev, bo_va);
721604a1
JG
56 }
57}
58
67e8e3f9
MO
59static void radeon_update_memory_usage(struct radeon_bo *bo,
60 unsigned mem_type, int sign)
61{
62 struct radeon_device *rdev = bo->rdev;
63 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;
64
65 switch (mem_type) {
66 case TTM_PL_TT:
67 if (sign > 0)
68 atomic64_add(size, &rdev->gtt_usage);
69 else
70 atomic64_sub(size, &rdev->gtt_usage);
71 break;
72 case TTM_PL_VRAM:
73 if (sign > 0)
74 atomic64_add(size, &rdev->vram_usage);
75 else
76 atomic64_sub(size, &rdev->vram_usage);
77 break;
78 }
79}
80
4c788679 81static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
771fe6b9 82{
4c788679 83 struct radeon_bo *bo;
771fe6b9 84
4c788679 85 bo = container_of(tbo, struct radeon_bo, tbo);
67e8e3f9
MO
86
87 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
88
4c788679
JG
89 mutex_lock(&bo->rdev->gem.mutex);
90 list_del_init(&bo->list);
91 mutex_unlock(&bo->rdev->gem.mutex);
92 radeon_bo_clear_surface_reg(bo);
721604a1 93 radeon_bo_clear_va(bo);
441921d5 94 drm_gem_object_release(&bo->gem_base);
4c788679 95 kfree(bo);
771fe6b9
JG
96}
97
d03d8589
JG
98bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
99{
100 if (bo->destroy == &radeon_ttm_bo_destroy)
101 return true;
102 return false;
103}
104
312ea8da
JG
105void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
106{
107 u32 c = 0;
108
109 rbo->placement.fpfn = 0;
93225b0d 110 rbo->placement.lpfn = 0;
312ea8da 111 rbo->placement.placement = rbo->placements;
20707874 112 rbo->placement.busy_placement = rbo->placements;
312ea8da
JG
113 if (domain & RADEON_GEM_DOMAIN_VRAM)
114 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
115 TTM_PL_FLAG_VRAM;
0d0b3e74
JG
116 if (domain & RADEON_GEM_DOMAIN_GTT) {
117 if (rbo->rdev->flags & RADEON_IS_AGP) {
118 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
119 } else {
120 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
121 }
122 }
123 if (domain & RADEON_GEM_DOMAIN_CPU) {
124 if (rbo->rdev->flags & RADEON_IS_AGP) {
dd54fee7 125 rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
0d0b3e74 126 } else {
dd54fee7 127 rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
0d0b3e74
JG
128 }
129 }
9fb03e63
JG
130 if (!c)
131 rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
312ea8da
JG
132 rbo->placement.num_placement = c;
133 rbo->placement.num_busy_placement = c;
134}
135
441921d5 136int radeon_bo_create(struct radeon_device *rdev,
268b2510 137 unsigned long size, int byte_align, bool kernel, u32 domain,
40f5cf99 138 struct sg_table *sg, struct radeon_bo **bo_ptr)
771fe6b9 139{
4c788679 140 struct radeon_bo *bo;
771fe6b9 141 enum ttm_bo_type type;
93225b0d 142 unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
57de4ba9 143 size_t acc_size;
771fe6b9
JG
144 int r;
145
441921d5
DV
146 size = ALIGN(size, PAGE_SIZE);
147
771fe6b9
JG
148 if (kernel) {
149 type = ttm_bo_type_kernel;
40f5cf99
AD
150 } else if (sg) {
151 type = ttm_bo_type_sg;
771fe6b9
JG
152 } else {
153 type = ttm_bo_type_device;
154 }
4c788679 155 *bo_ptr = NULL;
2b66b50b 156
57de4ba9
JG
157 acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
158 sizeof(struct radeon_bo));
159
4c788679
JG
160 bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
161 if (bo == NULL)
771fe6b9 162 return -ENOMEM;
441921d5
DV
163 r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
164 if (unlikely(r)) {
165 kfree(bo);
166 return r;
167 }
4c788679 168 bo->rdev = rdev;
4c788679
JG
169 bo->surface_reg = -1;
170 INIT_LIST_HEAD(&bo->list);
721604a1 171 INIT_LIST_HEAD(&bo->va);
bda72d58
MO
172 bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
173 RADEON_GEM_DOMAIN_GTT |
174 RADEON_GEM_DOMAIN_CPU);
1fb107fc 175 radeon_ttm_placement_from_domain(bo, domain);
5cc6fbab 176 /* Kernel allocation are uninterruptible */
db7fce39 177 down_read(&rdev->pm.mclk_lock);
1fb107fc 178 r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
0b91c4a1 179 &bo->placement, page_align, !kernel, NULL,
40f5cf99 180 acc_size, sg, &radeon_ttm_bo_destroy);
db7fce39 181 up_read(&rdev->pm.mclk_lock);
771fe6b9 182 if (unlikely(r != 0)) {
771fe6b9
JG
183 return r;
184 }
4c788679 185 *bo_ptr = bo;
441921d5 186
99ee7fac 187 trace_radeon_bo_create(bo);
441921d5 188
771fe6b9
JG
189 return 0;
190}
191
4c788679 192int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
771fe6b9 193{
4c788679 194 bool is_iomem;
771fe6b9
JG
195 int r;
196
4c788679 197 if (bo->kptr) {
771fe6b9 198 if (ptr) {
4c788679 199 *ptr = bo->kptr;
771fe6b9 200 }
771fe6b9
JG
201 return 0;
202 }
4c788679 203 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
771fe6b9
JG
204 if (r) {
205 return r;
206 }
4c788679 207 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
771fe6b9 208 if (ptr) {
4c788679 209 *ptr = bo->kptr;
771fe6b9 210 }
4c788679 211 radeon_bo_check_tiling(bo, 0, 0);
771fe6b9
JG
212 return 0;
213}
214
4c788679 215void radeon_bo_kunmap(struct radeon_bo *bo)
771fe6b9 216{
4c788679 217 if (bo->kptr == NULL)
771fe6b9 218 return;
4c788679
JG
219 bo->kptr = NULL;
220 radeon_bo_check_tiling(bo, 0, 0);
221 ttm_bo_kunmap(&bo->kmap);
771fe6b9
JG
222}
223
4c788679 224void radeon_bo_unref(struct radeon_bo **bo)
771fe6b9 225{
4c788679 226 struct ttm_buffer_object *tbo;
f4b7fb94 227 struct radeon_device *rdev;
771fe6b9 228
4c788679 229 if ((*bo) == NULL)
771fe6b9 230 return;
f4b7fb94 231 rdev = (*bo)->rdev;
4c788679 232 tbo = &((*bo)->tbo);
db7fce39 233 down_read(&rdev->pm.mclk_lock);
4c788679 234 ttm_bo_unref(&tbo);
db7fce39 235 up_read(&rdev->pm.mclk_lock);
4c788679
JG
236 if (tbo == NULL)
237 *bo = NULL;
771fe6b9
JG
238}
239
c4353016
MD
240int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
241 u64 *gpu_addr)
771fe6b9 242{
312ea8da 243 int r, i;
771fe6b9 244
4c788679
JG
245 if (bo->pin_count) {
246 bo->pin_count++;
247 if (gpu_addr)
248 *gpu_addr = radeon_bo_gpu_offset(bo);
d936622c
MD
249
250 if (max_offset != 0) {
251 u64 domain_start;
252
253 if (domain == RADEON_GEM_DOMAIN_VRAM)
254 domain_start = bo->rdev->mc.vram_start;
255 else
256 domain_start = bo->rdev->mc.gtt_start;
e199fd42
MD
257 WARN_ON_ONCE(max_offset <
258 (radeon_bo_gpu_offset(bo) - domain_start));
d936622c
MD
259 }
260
771fe6b9
JG
261 return 0;
262 }
312ea8da 263 radeon_ttm_placement_from_domain(bo, domain);
3ca82da3
MD
264 if (domain == RADEON_GEM_DOMAIN_VRAM) {
265 /* force to pin into visible video ram */
266 bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
267 }
c4353016
MD
268 if (max_offset) {
269 u64 lpfn = max_offset >> PAGE_SHIFT;
270
271 if (!bo->placement.lpfn)
272 bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;
273
274 if (lpfn < bo->placement.lpfn)
275 bo->placement.lpfn = lpfn;
276 }
312ea8da
JG
277 for (i = 0; i < bo->placement.num_placement; i++)
278 bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
97a875cb 279 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
4c788679
JG
280 if (likely(r == 0)) {
281 bo->pin_count = 1;
282 if (gpu_addr != NULL)
283 *gpu_addr = radeon_bo_gpu_offset(bo);
771fe6b9 284 }
5cc6fbab 285 if (unlikely(r != 0))
4c788679 286 dev_err(bo->rdev->dev, "%p pin failed\n", bo);
771fe6b9
JG
287 return r;
288}
c4353016
MD
289
290int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
291{
292 return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
293}
771fe6b9 294
4c788679 295int radeon_bo_unpin(struct radeon_bo *bo)
771fe6b9 296{
312ea8da 297 int r, i;
771fe6b9 298
4c788679
JG
299 if (!bo->pin_count) {
300 dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
301 return 0;
771fe6b9 302 }
4c788679
JG
303 bo->pin_count--;
304 if (bo->pin_count)
305 return 0;
312ea8da
JG
306 for (i = 0; i < bo->placement.num_placement; i++)
307 bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
97a875cb 308 r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
5cc6fbab 309 if (unlikely(r != 0))
4c788679 310 dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
5cc6fbab 311 return r;
cefb87ef
DA
312}
313
4c788679 314int radeon_bo_evict_vram(struct radeon_device *rdev)
771fe6b9 315{
d796d844
DA
316 /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
317 if (0 && (rdev->flags & RADEON_IS_IGP)) {
06b6476d
AD
318 if (rdev->mc.igp_sideport_enabled == false)
319 /* Useless to evict on IGP chips */
320 return 0;
771fe6b9
JG
321 }
322 return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
323}
324
4c788679 325void radeon_bo_force_delete(struct radeon_device *rdev)
771fe6b9 326{
4c788679 327 struct radeon_bo *bo, *n;
771fe6b9
JG
328
329 if (list_empty(&rdev->gem.objects)) {
330 return;
331 }
4c788679
JG
332 dev_err(rdev->dev, "Userspace still has active objects !\n");
333 list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
771fe6b9 334 mutex_lock(&rdev->ddev->struct_mutex);
4c788679 335 dev_err(rdev->dev, "%p %p %lu %lu force free\n",
31c3603d
DV
336 &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
337 *((unsigned long *)&bo->gem_base.refcount));
4c788679
JG
338 mutex_lock(&bo->rdev->gem.mutex);
339 list_del_init(&bo->list);
340 mutex_unlock(&bo->rdev->gem.mutex);
91132d6b 341 /* this should unref the ttm bo */
31c3603d 342 drm_gem_object_unreference(&bo->gem_base);
771fe6b9
JG
343 mutex_unlock(&rdev->ddev->struct_mutex);
344 }
345}
346
4c788679 347int radeon_bo_init(struct radeon_device *rdev)
771fe6b9 348{
a4d68279 349 /* Add an MTRR for the VRAM */
a0a53aa8 350 if (!rdev->fastfb_working) {
07ebea25
AL
351 rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
352 rdev->mc.aper_size);
a0a53aa8 353 }
a4d68279
JG
354 DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
355 rdev->mc.mc_vram_size >> 20,
356 (unsigned long long)rdev->mc.aper_size >> 20);
357 DRM_INFO("RAM width %dbits %cDR\n",
358 rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
771fe6b9
JG
359 return radeon_ttm_init(rdev);
360}
361
4c788679 362void radeon_bo_fini(struct radeon_device *rdev)
771fe6b9
JG
363{
364 radeon_ttm_fini(rdev);
07ebea25 365 arch_phys_wc_del(rdev->mc.vram_mtrr);
771fe6b9
JG
366}
367
19dff56a
MO
368/* Returns how many bytes TTM can move per IB.
369 */
370static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
371{
372 u64 real_vram_size = rdev->mc.real_vram_size;
373 u64 vram_usage = atomic64_read(&rdev->vram_usage);
374
375 /* This function is based on the current VRAM usage.
376 *
377 * - If all of VRAM is free, allow relocating the number of bytes that
378 * is equal to 1/4 of the size of VRAM for this IB.
379
380 * - If more than one half of VRAM is occupied, only allow relocating
381 * 1 MB of data for this IB.
382 *
383 * - From 0 to one half of used VRAM, the threshold decreases
384 * linearly.
385 * __________________
386 * 1/4 of -|\ |
387 * VRAM | \ |
388 * | \ |
389 * | \ |
390 * | \ |
391 * | \ |
392 * | \ |
393 * | \________|1 MB
394 * |----------------|
395 * VRAM 0 % 100 %
396 * used used
397 *
398 * Note: It's a threshold, not a limit. The threshold must be crossed
399 * for buffer relocations to stop, so any buffer of an arbitrary size
400 * can be moved as long as the threshold isn't crossed before
401 * the relocation takes place. We don't want to disable buffer
402 * relocations completely.
403 *
404 * The idea is that buffers should be placed in VRAM at creation time
405 * and TTM should only do a minimum number of relocations during
406 * command submission. In practice, you need to submit at least
407 * a dozen IBs to move all buffers to VRAM if they are in GTT.
408 *
409 * Also, things can get pretty crazy under memory pressure and actual
410 * VRAM usage can change a lot, so playing safe even at 50% does
411 * consistently increase performance.
412 */
413
414 u64 half_vram = real_vram_size >> 1;
415 u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
416 u64 bytes_moved_threshold = half_free_vram >> 1;
417 return max(bytes_moved_threshold, 1024*1024ull);
418}
419
420int radeon_bo_list_validate(struct radeon_device *rdev,
421 struct ww_acquire_ctx *ticket,
ecff665f 422 struct list_head *head, int ring)
771fe6b9 423{
df0af440 424 struct radeon_cs_reloc *lobj;
4c788679 425 struct radeon_bo *bo;
771fe6b9 426 int r;
19dff56a
MO
427 u64 bytes_moved = 0, initial_bytes_moved;
428 u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
771fe6b9 429
ecff665f 430 r = ttm_eu_reserve_buffers(ticket, head);
771fe6b9 431 if (unlikely(r != 0)) {
771fe6b9
JG
432 return r;
433 }
19dff56a 434
147666fb 435 list_for_each_entry(lobj, head, tv.head) {
df0af440 436 bo = lobj->robj;
4c788679 437 if (!bo->pin_count) {
19dff56a
MO
438 u32 domain = lobj->domain;
439 u32 current_domain =
440 radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
441
442 /* Check if this buffer will be moved and don't move it
443 * if we have moved too many buffers for this IB already.
444 *
445 * Note that this allows moving at least one buffer of
446 * any size, because it doesn't take the current "bo"
447 * into account. We don't want to disallow buffer moves
448 * completely.
449 */
450 if (current_domain != RADEON_GEM_DOMAIN_CPU &&
451 (domain & current_domain) == 0 && /* will be moved */
452 bytes_moved > bytes_moved_threshold) {
453 /* don't move it */
454 domain = current_domain;
455 }
456
20707874
AD
457 retry:
458 radeon_ttm_placement_from_domain(bo, domain);
f2ba57b5
CK
459 if (ring == R600_RING_TYPE_UVD_INDEX)
460 radeon_uvd_force_into_uvd_segment(bo);
19dff56a
MO
461
462 initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
463 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
464 bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
465 initial_bytes_moved;
466
e376573f 467 if (unlikely(r)) {
4474f3a9
CK
468 if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
469 domain = lobj->alt_domain;
20707874
AD
470 goto retry;
471 }
1b6e5fd5 472 ttm_eu_backoff_reservation(ticket, head);
771fe6b9 473 return r;
e376573f 474 }
771fe6b9 475 }
4c788679
JG
476 lobj->gpu_offset = radeon_bo_gpu_offset(bo);
477 lobj->tiling_flags = bo->tiling_flags;
771fe6b9
JG
478 }
479 return 0;
480}
481
4c788679 482int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
771fe6b9
JG
483 struct vm_area_struct *vma)
484{
4c788679 485 return ttm_fbdev_mmap(vma, &bo->tbo);
771fe6b9
JG
486}
487
550e2d92 488int radeon_bo_get_surface_reg(struct radeon_bo *bo)
771fe6b9 489{
4c788679 490 struct radeon_device *rdev = bo->rdev;
e024e110 491 struct radeon_surface_reg *reg;
4c788679 492 struct radeon_bo *old_object;
e024e110
DA
493 int steal;
494 int i;
495
977c38d5 496 lockdep_assert_held(&bo->tbo.resv->lock.base);
4c788679
JG
497
498 if (!bo->tiling_flags)
e024e110
DA
499 return 0;
500
4c788679
JG
501 if (bo->surface_reg >= 0) {
502 reg = &rdev->surface_regs[bo->surface_reg];
503 i = bo->surface_reg;
e024e110
DA
504 goto out;
505 }
506
507 steal = -1;
508 for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
509
510 reg = &rdev->surface_regs[i];
4c788679 511 if (!reg->bo)
e024e110
DA
512 break;
513
4c788679 514 old_object = reg->bo;
e024e110
DA
515 if (old_object->pin_count == 0)
516 steal = i;
517 }
518
519 /* if we are all out */
520 if (i == RADEON_GEM_MAX_SURFACES) {
521 if (steal == -1)
522 return -ENOMEM;
523 /* find someone with a surface reg and nuke their BO */
524 reg = &rdev->surface_regs[steal];
4c788679 525 old_object = reg->bo;
e024e110
DA
526 /* blow away the mapping */
527 DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
4c788679 528 ttm_bo_unmap_virtual(&old_object->tbo);
e024e110
DA
529 old_object->surface_reg = -1;
530 i = steal;
531 }
532
4c788679
JG
533 bo->surface_reg = i;
534 reg->bo = bo;
e024e110
DA
535
536out:
4c788679 537 radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
d961db75 538 bo->tbo.mem.start << PAGE_SHIFT,
4c788679 539 bo->tbo.num_pages << PAGE_SHIFT);
e024e110
DA
540 return 0;
541}
542
4c788679 543static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
e024e110 544{
4c788679 545 struct radeon_device *rdev = bo->rdev;
e024e110
DA
546 struct radeon_surface_reg *reg;
547
4c788679 548 if (bo->surface_reg == -1)
e024e110
DA
549 return;
550
4c788679
JG
551 reg = &rdev->surface_regs[bo->surface_reg];
552 radeon_clear_surface_reg(rdev, bo->surface_reg);
e024e110 553
4c788679
JG
554 reg->bo = NULL;
555 bo->surface_reg = -1;
e024e110
DA
556}
557
4c788679
JG
558int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
559 uint32_t tiling_flags, uint32_t pitch)
e024e110 560{
285484e2 561 struct radeon_device *rdev = bo->rdev;
4c788679
JG
562 int r;
563
285484e2
JG
564 if (rdev->family >= CHIP_CEDAR) {
565 unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;
566
567 bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
568 bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
569 mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
570 tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
571 stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
572 switch (bankw) {
573 case 0:
574 case 1:
575 case 2:
576 case 4:
577 case 8:
578 break;
579 default:
580 return -EINVAL;
581 }
582 switch (bankh) {
583 case 0:
584 case 1:
585 case 2:
586 case 4:
587 case 8:
588 break;
589 default:
590 return -EINVAL;
591 }
592 switch (mtaspect) {
593 case 0:
594 case 1:
595 case 2:
596 case 4:
597 case 8:
598 break;
599 default:
600 return -EINVAL;
601 }
602 if (tilesplit > 6) {
603 return -EINVAL;
604 }
605 if (stilesplit > 6) {
606 return -EINVAL;
607 }
608 }
4c788679
JG
609 r = radeon_bo_reserve(bo, false);
610 if (unlikely(r != 0))
611 return r;
612 bo->tiling_flags = tiling_flags;
613 bo->pitch = pitch;
614 radeon_bo_unreserve(bo);
615 return 0;
e024e110
DA
616}
617
4c788679
JG
618void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
619 uint32_t *tiling_flags,
620 uint32_t *pitch)
e024e110 621{
977c38d5
ML
622 lockdep_assert_held(&bo->tbo.resv->lock.base);
623
e024e110 624 if (tiling_flags)
4c788679 625 *tiling_flags = bo->tiling_flags;
e024e110 626 if (pitch)
4c788679 627 *pitch = bo->pitch;
e024e110
DA
628}
629
4c788679
JG
630int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
631 bool force_drop)
e024e110 632{
977c38d5
ML
633 if (!force_drop)
634 lockdep_assert_held(&bo->tbo.resv->lock.base);
4c788679
JG
635
636 if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
e024e110
DA
637 return 0;
638
639 if (force_drop) {
4c788679 640 radeon_bo_clear_surface_reg(bo);
e024e110
DA
641 return 0;
642 }
643
4c788679 644 if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
e024e110
DA
645 if (!has_moved)
646 return 0;
647
4c788679
JG
648 if (bo->surface_reg >= 0)
649 radeon_bo_clear_surface_reg(bo);
e024e110
DA
650 return 0;
651 }
652
4c788679 653 if ((bo->surface_reg >= 0) && !has_moved)
e024e110
DA
654 return 0;
655
4c788679 656 return radeon_bo_get_surface_reg(bo);
e024e110
DA
657}
658
659void radeon_bo_move_notify(struct ttm_buffer_object *bo,
67e8e3f9 660 struct ttm_mem_reg *new_mem)
e024e110 661{
d03d8589 662 struct radeon_bo *rbo;
67e8e3f9 663
d03d8589
JG
664 if (!radeon_ttm_bo_is_radeon_bo(bo))
665 return;
67e8e3f9 666
d03d8589 667 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 668 radeon_bo_check_tiling(rbo, 0, 1);
721604a1 669 radeon_vm_bo_invalidate(rbo->rdev, rbo);
67e8e3f9
MO
670
671 /* update statistics */
672 if (!new_mem)
673 return;
674
675 radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
676 radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
e024e110
DA
677}
678
0a2d50e3 679int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
e024e110 680{
0a2d50e3 681 struct radeon_device *rdev;
d03d8589 682 struct radeon_bo *rbo;
0a2d50e3
JG
683 unsigned long offset, size;
684 int r;
685
d03d8589 686 if (!radeon_ttm_bo_is_radeon_bo(bo))
0a2d50e3 687 return 0;
d03d8589 688 rbo = container_of(bo, struct radeon_bo, tbo);
4c788679 689 radeon_bo_check_tiling(rbo, 0, 0);
0a2d50e3
JG
690 rdev = rbo->rdev;
691 if (bo->mem.mem_type == TTM_PL_VRAM) {
692 size = bo->mem.num_pages << PAGE_SHIFT;
d961db75 693 offset = bo->mem.start << PAGE_SHIFT;
0a2d50e3
JG
694 if ((offset + size) > rdev->mc.visible_vram_size) {
695 /* hurrah the memory is not visible ! */
696 radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
697 rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
97a875cb 698 r = ttm_bo_validate(bo, &rbo->placement, false, false);
0a2d50e3
JG
699 if (unlikely(r != 0))
700 return r;
d961db75 701 offset = bo->mem.start << PAGE_SHIFT;
0a2d50e3
JG
702 /* this should not happen */
703 if ((offset + size) > rdev->mc.visible_vram_size)
704 return -EINVAL;
705 }
706 }
707 return 0;
e024e110 708}
ce580fab 709
83f30d0e 710int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
ce580fab
AK
711{
712 int r;
713
714 r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
715 if (unlikely(r != 0))
716 return r;
717 spin_lock(&bo->tbo.bdev->fence_lock);
718 if (mem_type)
719 *mem_type = bo->tbo.mem.mem_type;
720 if (bo->tbo.sync_obj)
1717c0e2 721 r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
ce580fab
AK
722 spin_unlock(&bo->tbo.bdev->fence_lock);
723 ttm_bo_unreserve(&bo->tbo);
724 return r;
725}
This page took 0.330811 seconds and 5 git commands to generate.