Commit | Line | Data |
---|---|---|
f64122c1 DA |
1 | /* |
2 | * Copyright 2013 Red Hat Inc. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice shall be included in | |
12 | * all copies or substantial portions of the Software. | |
13 | * | |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
20 | * OTHER DEALINGS IN THE SOFTWARE. | |
21 | * | |
22 | * Authors: Dave Airlie | |
23 | * Alon Levy | |
24 | */ | |
25 | ||
26 | #include <ttm/ttm_bo_api.h> | |
27 | #include <ttm/ttm_bo_driver.h> | |
28 | #include <ttm/ttm_placement.h> | |
29 | #include <ttm/ttm_page_alloc.h> | |
30 | #include <ttm/ttm_module.h> | |
31 | #include <drm/drmP.h> | |
32 | #include <drm/drm.h> | |
33 | #include <drm/qxl_drm.h> | |
34 | #include "qxl_drv.h" | |
35 | #include "qxl_object.h" | |
36 | ||
37 | #include <linux/delay.h> | |
38 | static int qxl_ttm_debugfs_init(struct qxl_device *qdev); | |
39 | ||
40 | static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) | |
41 | { | |
42 | struct qxl_mman *mman; | |
43 | struct qxl_device *qdev; | |
44 | ||
45 | mman = container_of(bdev, struct qxl_mman, bdev); | |
46 | qdev = container_of(mman, struct qxl_device, mman); | |
47 | return qdev; | |
48 | } | |
49 | ||
50 | static int qxl_ttm_mem_global_init(struct drm_global_reference *ref) | |
51 | { | |
52 | return ttm_mem_global_init(ref->object); | |
53 | } | |
54 | ||
55 | static void qxl_ttm_mem_global_release(struct drm_global_reference *ref) | |
56 | { | |
57 | ttm_mem_global_release(ref->object); | |
58 | } | |
59 | ||
60 | static int qxl_ttm_global_init(struct qxl_device *qdev) | |
61 | { | |
62 | struct drm_global_reference *global_ref; | |
63 | int r; | |
64 | ||
65 | qdev->mman.mem_global_referenced = false; | |
66 | global_ref = &qdev->mman.mem_global_ref; | |
67 | global_ref->global_type = DRM_GLOBAL_TTM_MEM; | |
68 | global_ref->size = sizeof(struct ttm_mem_global); | |
69 | global_ref->init = &qxl_ttm_mem_global_init; | |
70 | global_ref->release = &qxl_ttm_mem_global_release; | |
71 | ||
72 | r = drm_global_item_ref(global_ref); | |
73 | if (r != 0) { | |
74 | DRM_ERROR("Failed setting up TTM memory accounting " | |
75 | "subsystem.\n"); | |
76 | return r; | |
77 | } | |
78 | ||
79 | qdev->mman.bo_global_ref.mem_glob = | |
80 | qdev->mman.mem_global_ref.object; | |
81 | global_ref = &qdev->mman.bo_global_ref.ref; | |
82 | global_ref->global_type = DRM_GLOBAL_TTM_BO; | |
83 | global_ref->size = sizeof(struct ttm_bo_global); | |
84 | global_ref->init = &ttm_bo_global_init; | |
85 | global_ref->release = &ttm_bo_global_release; | |
86 | r = drm_global_item_ref(global_ref); | |
87 | if (r != 0) { | |
88 | DRM_ERROR("Failed setting up TTM BO subsystem.\n"); | |
89 | drm_global_item_unref(&qdev->mman.mem_global_ref); | |
90 | return r; | |
91 | } | |
92 | ||
93 | qdev->mman.mem_global_referenced = true; | |
94 | return 0; | |
95 | } | |
96 | ||
97 | static void qxl_ttm_global_fini(struct qxl_device *qdev) | |
98 | { | |
99 | if (qdev->mman.mem_global_referenced) { | |
100 | drm_global_item_unref(&qdev->mman.bo_global_ref.ref); | |
101 | drm_global_item_unref(&qdev->mman.mem_global_ref); | |
102 | qdev->mman.mem_global_referenced = false; | |
103 | } | |
104 | } | |
105 | ||
106 | static struct vm_operations_struct qxl_ttm_vm_ops; | |
107 | static const struct vm_operations_struct *ttm_vm_ops; | |
108 | ||
109 | static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |
110 | { | |
111 | struct ttm_buffer_object *bo; | |
112 | struct qxl_device *qdev; | |
113 | int r; | |
114 | ||
115 | bo = (struct ttm_buffer_object *)vma->vm_private_data; | |
116 | if (bo == NULL) | |
117 | return VM_FAULT_NOPAGE; | |
118 | qdev = qxl_get_qdev(bo->bdev); | |
119 | r = ttm_vm_ops->fault(vma, vmf); | |
120 | return r; | |
121 | } | |
122 | ||
123 | int qxl_mmap(struct file *filp, struct vm_area_struct *vma) | |
124 | { | |
125 | struct drm_file *file_priv; | |
126 | struct qxl_device *qdev; | |
127 | int r; | |
128 | ||
129 | if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) { | |
130 | pr_info("%s: vma->vm_pgoff (%ld) < DRM_FILE_PAGE_OFFSET\n", | |
131 | __func__, vma->vm_pgoff); | |
132 | return drm_mmap(filp, vma); | |
133 | } | |
134 | ||
135 | file_priv = filp->private_data; | |
136 | qdev = file_priv->minor->dev->dev_private; | |
137 | if (qdev == NULL) { | |
138 | DRM_ERROR( | |
139 | "filp->private_data->minor->dev->dev_private == NULL\n"); | |
140 | return -EINVAL; | |
141 | } | |
142 | QXL_INFO(qdev, "%s: filp->private_data = 0x%p, vma->vm_pgoff = %lx\n", | |
143 | __func__, filp->private_data, vma->vm_pgoff); | |
144 | ||
145 | r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); | |
146 | if (unlikely(r != 0)) | |
147 | return r; | |
148 | if (unlikely(ttm_vm_ops == NULL)) { | |
149 | ttm_vm_ops = vma->vm_ops; | |
150 | qxl_ttm_vm_ops = *ttm_vm_ops; | |
151 | qxl_ttm_vm_ops.fault = &qxl_ttm_fault; | |
152 | } | |
153 | vma->vm_ops = &qxl_ttm_vm_ops; | |
154 | return 0; | |
155 | } | |
156 | ||
157 | static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) | |
158 | { | |
159 | return 0; | |
160 | } | |
161 | ||
162 | static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, | |
163 | struct ttm_mem_type_manager *man) | |
164 | { | |
165 | struct qxl_device *qdev; | |
166 | ||
167 | qdev = qxl_get_qdev(bdev); | |
168 | ||
169 | switch (type) { | |
170 | case TTM_PL_SYSTEM: | |
171 | /* System memory */ | |
172 | man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; | |
173 | man->available_caching = TTM_PL_MASK_CACHING; | |
174 | man->default_caching = TTM_PL_FLAG_CACHED; | |
175 | break; | |
176 | case TTM_PL_VRAM: | |
177 | case TTM_PL_PRIV0: | |
178 | /* "On-card" video ram */ | |
179 | man->func = &ttm_bo_manager_func; | |
180 | man->gpu_offset = 0; | |
181 | man->flags = TTM_MEMTYPE_FLAG_FIXED | | |
182 | TTM_MEMTYPE_FLAG_MAPPABLE; | |
183 | man->available_caching = TTM_PL_MASK_CACHING; | |
184 | man->default_caching = TTM_PL_FLAG_CACHED; | |
185 | break; | |
186 | default: | |
187 | DRM_ERROR("Unsupported memory type %u\n", (unsigned)type); | |
188 | return -EINVAL; | |
189 | } | |
190 | return 0; | |
191 | } | |
192 | ||
193 | static void qxl_evict_flags(struct ttm_buffer_object *bo, | |
194 | struct ttm_placement *placement) | |
195 | { | |
196 | struct qxl_bo *qbo; | |
197 | static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; | |
198 | ||
199 | if (!qxl_ttm_bo_is_qxl_bo(bo)) { | |
200 | placement->fpfn = 0; | |
201 | placement->lpfn = 0; | |
202 | placement->placement = &placements; | |
203 | placement->busy_placement = &placements; | |
204 | placement->num_placement = 1; | |
205 | placement->num_busy_placement = 1; | |
206 | return; | |
207 | } | |
208 | qbo = container_of(bo, struct qxl_bo, tbo); | |
209 | qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); | |
210 | *placement = qbo->placement; | |
211 | } | |
212 | ||
213 | static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) | |
214 | { | |
215 | return 0; | |
216 | } | |
217 | ||
218 | static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, | |
219 | struct ttm_mem_reg *mem) | |
220 | { | |
221 | struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; | |
222 | struct qxl_device *qdev = qxl_get_qdev(bdev); | |
223 | ||
224 | mem->bus.addr = NULL; | |
225 | mem->bus.offset = 0; | |
226 | mem->bus.size = mem->num_pages << PAGE_SHIFT; | |
227 | mem->bus.base = 0; | |
228 | mem->bus.is_iomem = false; | |
229 | if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) | |
230 | return -EINVAL; | |
231 | switch (mem->mem_type) { | |
232 | case TTM_PL_SYSTEM: | |
233 | /* system memory */ | |
234 | return 0; | |
235 | case TTM_PL_VRAM: | |
236 | mem->bus.is_iomem = true; | |
237 | mem->bus.base = qdev->vram_base; | |
238 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
239 | break; | |
240 | case TTM_PL_PRIV0: | |
241 | mem->bus.is_iomem = true; | |
242 | mem->bus.base = qdev->surfaceram_base; | |
243 | mem->bus.offset = mem->start << PAGE_SHIFT; | |
244 | break; | |
245 | default: | |
246 | return -EINVAL; | |
247 | } | |
248 | return 0; | |
249 | } | |
250 | ||
251 | static void qxl_ttm_io_mem_free(struct ttm_bo_device *bdev, | |
252 | struct ttm_mem_reg *mem) | |
253 | { | |
254 | } | |
255 | ||
256 | /* | |
257 | * TTM backend functions. | |
258 | */ | |
259 | struct qxl_ttm_tt { | |
260 | struct ttm_dma_tt ttm; | |
261 | struct qxl_device *qdev; | |
262 | u64 offset; | |
263 | }; | |
264 | ||
265 | static int qxl_ttm_backend_bind(struct ttm_tt *ttm, | |
266 | struct ttm_mem_reg *bo_mem) | |
267 | { | |
268 | struct qxl_ttm_tt *gtt = (void *)ttm; | |
269 | ||
270 | gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); | |
271 | if (!ttm->num_pages) { | |
272 | WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", | |
273 | ttm->num_pages, bo_mem, ttm); | |
274 | } | |
275 | /* Not implemented */ | |
276 | return -1; | |
277 | } | |
278 | ||
279 | static int qxl_ttm_backend_unbind(struct ttm_tt *ttm) | |
280 | { | |
281 | /* Not implemented */ | |
282 | return -1; | |
283 | } | |
284 | ||
285 | static void qxl_ttm_backend_destroy(struct ttm_tt *ttm) | |
286 | { | |
287 | struct qxl_ttm_tt *gtt = (void *)ttm; | |
288 | ||
289 | ttm_dma_tt_fini(>t->ttm); | |
290 | kfree(gtt); | |
291 | } | |
292 | ||
293 | static struct ttm_backend_func qxl_backend_func = { | |
294 | .bind = &qxl_ttm_backend_bind, | |
295 | .unbind = &qxl_ttm_backend_unbind, | |
296 | .destroy = &qxl_ttm_backend_destroy, | |
297 | }; | |
298 | ||
299 | static int qxl_ttm_tt_populate(struct ttm_tt *ttm) | |
300 | { | |
301 | int r; | |
302 | ||
303 | if (ttm->state != tt_unpopulated) | |
304 | return 0; | |
305 | ||
306 | r = ttm_pool_populate(ttm); | |
307 | if (r) | |
308 | return r; | |
309 | ||
310 | return 0; | |
311 | } | |
312 | ||
313 | static void qxl_ttm_tt_unpopulate(struct ttm_tt *ttm) | |
314 | { | |
315 | ttm_pool_unpopulate(ttm); | |
316 | } | |
317 | ||
6d01f1f5 DA |
318 | static struct ttm_tt *qxl_ttm_tt_create(struct ttm_bo_device *bdev, |
319 | unsigned long size, uint32_t page_flags, | |
320 | struct page *dummy_read_page) | |
f64122c1 DA |
321 | { |
322 | struct qxl_device *qdev; | |
323 | struct qxl_ttm_tt *gtt; | |
324 | ||
325 | qdev = qxl_get_qdev(bdev); | |
326 | gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); | |
327 | if (gtt == NULL) | |
328 | return NULL; | |
329 | gtt->ttm.ttm.func = &qxl_backend_func; | |
330 | gtt->qdev = qdev; | |
331 | if (ttm_dma_tt_init(>t->ttm, bdev, size, page_flags, | |
332 | dummy_read_page)) { | |
333 | kfree(gtt); | |
334 | return NULL; | |
335 | } | |
336 | return >t->ttm.ttm; | |
337 | } | |
338 | ||
339 | static void qxl_move_null(struct ttm_buffer_object *bo, | |
340 | struct ttm_mem_reg *new_mem) | |
341 | { | |
342 | struct ttm_mem_reg *old_mem = &bo->mem; | |
343 | ||
344 | BUG_ON(old_mem->mm_node != NULL); | |
345 | *old_mem = *new_mem; | |
346 | new_mem->mm_node = NULL; | |
347 | } | |
348 | ||
349 | static int qxl_bo_move(struct ttm_buffer_object *bo, | |
350 | bool evict, bool interruptible, | |
351 | bool no_wait_gpu, | |
352 | struct ttm_mem_reg *new_mem) | |
353 | { | |
354 | struct ttm_mem_reg *old_mem = &bo->mem; | |
355 | if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { | |
356 | qxl_move_null(bo, new_mem); | |
357 | return 0; | |
358 | } | |
359 | return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); | |
360 | } | |
361 | ||
362 | ||
363 | static int qxl_sync_obj_wait(void *sync_obj, | |
364 | bool lazy, bool interruptible) | |
365 | { | |
366 | struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; | |
367 | int count = 0, sc = 0; | |
368 | struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); | |
369 | ||
370 | if (qfence->num_active_releases == 0) | |
371 | return 0; | |
372 | ||
373 | retry: | |
374 | if (sc == 0) { | |
375 | if (bo->type == QXL_GEM_DOMAIN_SURFACE) | |
376 | qxl_update_surface(qfence->qdev, bo); | |
377 | } else if (sc >= 1) { | |
378 | qxl_io_notify_oom(qfence->qdev); | |
379 | } | |
380 | ||
381 | sc++; | |
382 | ||
383 | for (count = 0; count < 10; count++) { | |
384 | bool ret; | |
385 | ret = qxl_queue_garbage_collect(qfence->qdev, true); | |
386 | if (ret == false) | |
387 | break; | |
388 | ||
389 | if (qfence->num_active_releases == 0) | |
390 | return 0; | |
391 | } | |
392 | ||
393 | if (qfence->num_active_releases) { | |
394 | bool have_drawable_releases = false; | |
395 | void **slot; | |
396 | struct radix_tree_iter iter; | |
397 | int release_id; | |
398 | ||
399 | radix_tree_for_each_slot(slot, &qfence->tree, &iter, 0) { | |
400 | struct qxl_release *release; | |
401 | ||
402 | release_id = iter.index; | |
403 | release = qxl_release_from_id_locked(qfence->qdev, release_id); | |
404 | if (release == NULL) | |
405 | continue; | |
406 | ||
407 | if (release->type == QXL_RELEASE_DRAWABLE) | |
408 | have_drawable_releases = true; | |
409 | } | |
410 | ||
411 | qxl_queue_garbage_collect(qfence->qdev, true); | |
412 | ||
413 | if (have_drawable_releases || sc < 4) { | |
414 | if (sc > 2) | |
415 | /* back off */ | |
416 | usleep_range(500, 1000); | |
417 | if (have_drawable_releases && sc > 300) { | |
418 | WARN(1, "sync obj %d still has outstanding releases %d %d %d %ld %d\n", sc, bo->surface_id, bo->is_primary, bo->pin_count, (unsigned long)bo->gem_base.size, qfence->num_active_releases); | |
419 | return -EBUSY; | |
420 | } | |
421 | goto retry; | |
422 | } | |
423 | } | |
424 | return 0; | |
425 | } | |
426 | ||
427 | static int qxl_sync_obj_flush(void *sync_obj) | |
428 | { | |
429 | return 0; | |
430 | } | |
431 | ||
432 | static void qxl_sync_obj_unref(void **sync_obj) | |
433 | { | |
434 | } | |
435 | ||
436 | static void *qxl_sync_obj_ref(void *sync_obj) | |
437 | { | |
438 | return sync_obj; | |
439 | } | |
440 | ||
441 | static bool qxl_sync_obj_signaled(void *sync_obj) | |
442 | { | |
443 | struct qxl_fence *qfence = (struct qxl_fence *)sync_obj; | |
444 | return (qfence->num_active_releases == 0); | |
445 | } | |
446 | ||
447 | static void qxl_bo_move_notify(struct ttm_buffer_object *bo, | |
448 | struct ttm_mem_reg *new_mem) | |
449 | { | |
450 | struct qxl_bo *qbo; | |
451 | struct qxl_device *qdev; | |
452 | ||
453 | if (!qxl_ttm_bo_is_qxl_bo(bo)) | |
454 | return; | |
455 | qbo = container_of(bo, struct qxl_bo, tbo); | |
456 | qdev = qbo->gem_base.dev->dev_private; | |
457 | ||
458 | if (bo->mem.mem_type == TTM_PL_PRIV0 && qbo->surface_id) | |
459 | qxl_surface_evict(qdev, qbo, new_mem ? true : false); | |
460 | } | |
461 | ||
462 | static struct ttm_bo_driver qxl_bo_driver = { | |
463 | .ttm_tt_create = &qxl_ttm_tt_create, | |
464 | .ttm_tt_populate = &qxl_ttm_tt_populate, | |
465 | .ttm_tt_unpopulate = &qxl_ttm_tt_unpopulate, | |
466 | .invalidate_caches = &qxl_invalidate_caches, | |
467 | .init_mem_type = &qxl_init_mem_type, | |
468 | .evict_flags = &qxl_evict_flags, | |
469 | .move = &qxl_bo_move, | |
470 | .verify_access = &qxl_verify_access, | |
471 | .io_mem_reserve = &qxl_ttm_io_mem_reserve, | |
472 | .io_mem_free = &qxl_ttm_io_mem_free, | |
473 | .sync_obj_signaled = &qxl_sync_obj_signaled, | |
474 | .sync_obj_wait = &qxl_sync_obj_wait, | |
475 | .sync_obj_flush = &qxl_sync_obj_flush, | |
476 | .sync_obj_unref = &qxl_sync_obj_unref, | |
477 | .sync_obj_ref = &qxl_sync_obj_ref, | |
478 | .move_notify = &qxl_bo_move_notify, | |
479 | }; | |
480 | ||
481 | ||
482 | ||
483 | int qxl_ttm_init(struct qxl_device *qdev) | |
484 | { | |
485 | int r; | |
486 | int num_io_pages; /* != rom->num_io_pages, we include surface0 */ | |
487 | ||
488 | r = qxl_ttm_global_init(qdev); | |
489 | if (r) | |
490 | return r; | |
491 | /* No others user of address space so set it to 0 */ | |
492 | r = ttm_bo_device_init(&qdev->mman.bdev, | |
493 | qdev->mman.bo_global_ref.ref.object, | |
494 | &qxl_bo_driver, DRM_FILE_PAGE_OFFSET, 0); | |
495 | if (r) { | |
496 | DRM_ERROR("failed initializing buffer object driver(%d).\n", r); | |
497 | return r; | |
498 | } | |
499 | /* NOTE: this includes the framebuffer (aka surface 0) */ | |
500 | num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE; | |
501 | r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_VRAM, | |
502 | num_io_pages); | |
503 | if (r) { | |
504 | DRM_ERROR("Failed initializing VRAM heap.\n"); | |
505 | return r; | |
506 | } | |
507 | r = ttm_bo_init_mm(&qdev->mman.bdev, TTM_PL_PRIV0, | |
508 | qdev->surfaceram_size / PAGE_SIZE); | |
509 | if (r) { | |
510 | DRM_ERROR("Failed initializing Surfaces heap.\n"); | |
511 | return r; | |
512 | } | |
513 | DRM_INFO("qxl: %uM of VRAM memory size\n", | |
514 | (unsigned)qdev->vram_size / (1024 * 1024)); | |
515 | DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n", | |
516 | ((unsigned)num_io_pages * PAGE_SIZE) / (1024 * 1024)); | |
517 | if (unlikely(qdev->mman.bdev.dev_mapping == NULL)) | |
518 | qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping; | |
519 | r = qxl_ttm_debugfs_init(qdev); | |
520 | if (r) { | |
521 | DRM_ERROR("Failed to init debugfs\n"); | |
522 | return r; | |
523 | } | |
524 | return 0; | |
525 | } | |
526 | ||
527 | void qxl_ttm_fini(struct qxl_device *qdev) | |
528 | { | |
529 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_VRAM); | |
530 | ttm_bo_clean_mm(&qdev->mman.bdev, TTM_PL_PRIV0); | |
531 | ttm_bo_device_release(&qdev->mman.bdev); | |
532 | qxl_ttm_global_fini(qdev); | |
533 | DRM_INFO("qxl: ttm finalized\n"); | |
534 | } | |
535 | ||
536 | ||
537 | #define QXL_DEBUGFS_MEM_TYPES 2 | |
538 | ||
539 | #if defined(CONFIG_DEBUG_FS) | |
540 | static int qxl_mm_dump_table(struct seq_file *m, void *data) | |
541 | { | |
542 | struct drm_info_node *node = (struct drm_info_node *)m->private; | |
543 | struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; | |
544 | struct drm_device *dev = node->minor->dev; | |
545 | struct qxl_device *rdev = dev->dev_private; | |
546 | int ret; | |
547 | struct ttm_bo_global *glob = rdev->mman.bdev.glob; | |
548 | ||
549 | spin_lock(&glob->lru_lock); | |
550 | ret = drm_mm_dump_table(m, mm); | |
551 | spin_unlock(&glob->lru_lock); | |
552 | return ret; | |
553 | } | |
554 | #endif | |
555 | ||
556 | static int qxl_ttm_debugfs_init(struct qxl_device *qdev) | |
557 | { | |
e1adc78c | 558 | #if defined(CONFIG_DEBUG_FS) |
f64122c1 DA |
559 | static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES]; |
560 | static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32]; | |
561 | unsigned i; | |
562 | ||
563 | for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) { | |
564 | if (i == 0) | |
565 | sprintf(qxl_mem_types_names[i], "qxl_mem_mm"); | |
566 | else | |
567 | sprintf(qxl_mem_types_names[i], "qxl_surf_mm"); | |
568 | qxl_mem_types_list[i].name = qxl_mem_types_names[i]; | |
569 | qxl_mem_types_list[i].show = &qxl_mm_dump_table; | |
570 | qxl_mem_types_list[i].driver_features = 0; | |
571 | if (i == 0) | |
572 | qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv; | |
573 | else | |
574 | qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv; | |
575 | ||
576 | } | |
577 | return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i); | |
e1adc78c DA |
578 | #else |
579 | return 0; | |
580 | #endif | |
f64122c1 | 581 | } |