drm/radeon: add Mullins VCE support
[deliverable/linux.git] / drivers / gpu / drm / radeon / radeon_vce.c
CommitLineData
d93f7937
CK
1/*
2 * Copyright 2013 Advanced Micro Devices, Inc.
3 * All Rights Reserved.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
20 *
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
23 * of the Software.
24 *
25 * Authors: Christian König <christian.koenig@amd.com>
26 */
27
28#include <linux/firmware.h>
29#include <linux/module.h>
30#include <drm/drmP.h>
31#include <drm/drm.h>
32
33#include "radeon.h"
34#include "radeon_asic.h"
35#include "sid.h"
36
03afe6f6
AD
37/* 1 second timeout */
38#define VCE_IDLE_TIMEOUT_MS 1000
39
d93f7937
CK
40/* Firmware Names */
41#define FIRMWARE_BONAIRE "radeon/BONAIRE_vce.bin"
42
43MODULE_FIRMWARE(FIRMWARE_BONAIRE);
44
03afe6f6
AD
45static void radeon_vce_idle_work_handler(struct work_struct *work);
46
d93f7937
CK
47/**
48 * radeon_vce_init - allocate memory, load vce firmware
49 *
50 * @rdev: radeon_device pointer
51 *
52 * First step to get VCE online, allocate memory and load the firmware
53 */
54int radeon_vce_init(struct radeon_device *rdev)
55{
98ccc291
CK
56 static const char *fw_version = "[ATI LIB=VCEFW,";
57 static const char *fb_version = "[ATI LIB=VCEFWSTATS,";
58 unsigned long size;
59 const char *fw_name, *c;
60 uint8_t start, mid, end;
d93f7937
CK
61 int i, r;
62
03afe6f6
AD
63 INIT_DELAYED_WORK(&rdev->vce.idle_work, radeon_vce_idle_work_handler);
64
d93f7937
CK
65 switch (rdev->family) {
66 case CHIP_BONAIRE:
67 case CHIP_KAVERI:
68 case CHIP_KABINI:
428beddd 69 case CHIP_MULLINS:
d93f7937
CK
70 fw_name = FIRMWARE_BONAIRE;
71 break;
72
73 default:
74 return -EINVAL;
75 }
76
77 r = request_firmware(&rdev->vce_fw, fw_name, rdev->dev);
78 if (r) {
79 dev_err(rdev->dev, "radeon_vce: Can't load firmware \"%s\"\n",
80 fw_name);
81 return r;
82 }
83
98ccc291
CK
84 /* search for firmware version */
85
86 size = rdev->vce_fw->size - strlen(fw_version) - 9;
87 c = rdev->vce_fw->data;
88 for (;size > 0; --size, ++c)
89 if (strncmp(c, fw_version, strlen(fw_version)) == 0)
90 break;
91
92 if (size == 0)
93 return -EINVAL;
94
95 c += strlen(fw_version);
96 if (sscanf(c, "%2hhd.%2hhd.%2hhd]", &start, &mid, &end) != 3)
97 return -EINVAL;
98
99 /* search for feedback version */
100
101 size = rdev->vce_fw->size - strlen(fb_version) - 3;
102 c = rdev->vce_fw->data;
103 for (;size > 0; --size, ++c)
104 if (strncmp(c, fb_version, strlen(fb_version)) == 0)
105 break;
106
107 if (size == 0)
108 return -EINVAL;
109
110 c += strlen(fb_version);
111 if (sscanf(c, "%2u]", &rdev->vce.fb_version) != 1)
112 return -EINVAL;
113
114 DRM_INFO("Found VCE firmware/feedback version %hhd.%hhd.%hhd / %d!\n",
115 start, mid, end, rdev->vce.fb_version);
116
117 rdev->vce.fw_version = (start << 24) | (mid << 16) | (end << 8);
118
119 /* we can only work with this fw version for now */
120 if (rdev->vce.fw_version != ((40 << 24) | (2 << 16) | (2 << 8)))
121 return -EINVAL;
122
b03b4e4b 123 /* allocate firmware, stack and heap BO */
98ccc291
CK
124
125 size = RADEON_GPU_PAGE_ALIGN(rdev->vce_fw->size) +
126 RADEON_VCE_STACK_SIZE + RADEON_VCE_HEAP_SIZE;
127 r = radeon_bo_create(rdev, size, PAGE_SIZE, true,
d93f7937
CK
128 RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->vce.vcpu_bo);
129 if (r) {
130 dev_err(rdev->dev, "(%d) failed to allocate VCE bo\n", r);
131 return r;
132 }
133
b03b4e4b
CK
134 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
135 if (r) {
136 radeon_bo_unref(&rdev->vce.vcpu_bo);
137 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
d93f7937 138 return r;
b03b4e4b 139 }
d93f7937 140
b03b4e4b
CK
141 r = radeon_bo_pin(rdev->vce.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
142 &rdev->vce.gpu_addr);
143 radeon_bo_unreserve(rdev->vce.vcpu_bo);
144 if (r) {
145 radeon_bo_unref(&rdev->vce.vcpu_bo);
146 dev_err(rdev->dev, "(%d) VCE bo pin failed\n", r);
d93f7937 147 return r;
b03b4e4b 148 }
d93f7937
CK
149
150 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
151 atomic_set(&rdev->vce.handles[i], 0);
152 rdev->vce.filp[i] = NULL;
153 }
154
155 return 0;
156}
157
158/**
159 * radeon_vce_fini - free memory
160 *
161 * @rdev: radeon_device pointer
162 *
163 * Last step on VCE teardown, free firmware memory
164 */
165void radeon_vce_fini(struct radeon_device *rdev)
166{
b03b4e4b
CK
167 if (rdev->vce.vcpu_bo == NULL)
168 return;
169
d93f7937 170 radeon_bo_unref(&rdev->vce.vcpu_bo);
b03b4e4b
CK
171
172 release_firmware(rdev->vce_fw);
d93f7937
CK
173}
174
175/**
176 * radeon_vce_suspend - unpin VCE fw memory
177 *
178 * @rdev: radeon_device pointer
179 *
d93f7937
CK
180 */
181int radeon_vce_suspend(struct radeon_device *rdev)
182{
b03b4e4b 183 int i;
d93f7937
CK
184
185 if (rdev->vce.vcpu_bo == NULL)
186 return 0;
187
b03b4e4b
CK
188 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
189 if (atomic_read(&rdev->vce.handles[i]))
190 break;
191
192 if (i == RADEON_MAX_VCE_HANDLES)
193 return 0;
194
195 /* TODO: suspending running encoding sessions isn't supported */
196 return -EINVAL;
d93f7937
CK
197}
198
199/**
200 * radeon_vce_resume - pin VCE fw memory
201 *
202 * @rdev: radeon_device pointer
203 *
d93f7937
CK
204 */
205int radeon_vce_resume(struct radeon_device *rdev)
206{
b03b4e4b 207 void *cpu_addr;
d93f7937
CK
208 int r;
209
210 if (rdev->vce.vcpu_bo == NULL)
211 return -EINVAL;
212
213 r = radeon_bo_reserve(rdev->vce.vcpu_bo, false);
214 if (r) {
d93f7937
CK
215 dev_err(rdev->dev, "(%d) failed to reserve VCE bo\n", r);
216 return r;
217 }
218
b03b4e4b 219 r = radeon_bo_kmap(rdev->vce.vcpu_bo, &cpu_addr);
d93f7937
CK
220 if (r) {
221 radeon_bo_unreserve(rdev->vce.vcpu_bo);
d93f7937
CK
222 dev_err(rdev->dev, "(%d) VCE map failed\n", r);
223 return r;
224 }
225
b03b4e4b
CK
226 memcpy(cpu_addr, rdev->vce_fw->data, rdev->vce_fw->size);
227
228 radeon_bo_kunmap(rdev->vce.vcpu_bo);
229
d93f7937
CK
230 radeon_bo_unreserve(rdev->vce.vcpu_bo);
231
232 return 0;
233}
234
03afe6f6
AD
235/**
236 * radeon_vce_idle_work_handler - power off VCE
237 *
238 * @work: pointer to work structure
239 *
240 * power of VCE when it's not used any more
241 */
242static void radeon_vce_idle_work_handler(struct work_struct *work)
243{
244 struct radeon_device *rdev =
245 container_of(work, struct radeon_device, vce.idle_work.work);
246
247 if ((radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE1_INDEX) == 0) &&
248 (radeon_fence_count_emitted(rdev, TN_RING_TYPE_VCE2_INDEX) == 0)) {
249 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
250 radeon_dpm_enable_vce(rdev, false);
251 } else {
252 radeon_set_vce_clocks(rdev, 0, 0);
253 }
254 } else {
255 schedule_delayed_work(&rdev->vce.idle_work,
256 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
257 }
258}
259
260/**
261 * radeon_vce_note_usage - power up VCE
262 *
263 * @rdev: radeon_device pointer
264 *
265 * Make sure VCE is powerd up when we want to use it
266 */
267void radeon_vce_note_usage(struct radeon_device *rdev)
268{
269 bool streams_changed = false;
270 bool set_clocks = !cancel_delayed_work_sync(&rdev->vce.idle_work);
271 set_clocks &= schedule_delayed_work(&rdev->vce.idle_work,
272 msecs_to_jiffies(VCE_IDLE_TIMEOUT_MS));
273
274 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
275 /* XXX figure out if the streams changed */
276 streams_changed = false;
277 }
278
279 if (set_clocks || streams_changed) {
280 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
281 radeon_dpm_enable_vce(rdev, true);
282 } else {
283 radeon_set_vce_clocks(rdev, 53300, 40000);
284 }
285 }
286}
287
d93f7937
CK
288/**
289 * radeon_vce_free_handles - free still open VCE handles
290 *
291 * @rdev: radeon_device pointer
292 * @filp: drm file pointer
293 *
294 * Close all VCE handles still open by this file pointer
295 */
296void radeon_vce_free_handles(struct radeon_device *rdev, struct drm_file *filp)
297{
298 int i, r;
299 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
300 uint32_t handle = atomic_read(&rdev->vce.handles[i]);
301 if (!handle || rdev->vce.filp[i] != filp)
302 continue;
303
03afe6f6
AD
304 radeon_vce_note_usage(rdev);
305
d93f7937
CK
306 r = radeon_vce_get_destroy_msg(rdev, TN_RING_TYPE_VCE1_INDEX,
307 handle, NULL);
308 if (r)
309 DRM_ERROR("Error destroying VCE handle (%d)!\n", r);
310
311 rdev->vce.filp[i] = NULL;
312 atomic_set(&rdev->vce.handles[i], 0);
313 }
314}
315
316/**
317 * radeon_vce_get_create_msg - generate a VCE create msg
318 *
319 * @rdev: radeon_device pointer
320 * @ring: ring we should submit the msg to
321 * @handle: VCE session handle to use
322 * @fence: optional fence to return
323 *
324 * Open up a stream for HW test
325 */
326int radeon_vce_get_create_msg(struct radeon_device *rdev, int ring,
327 uint32_t handle, struct radeon_fence **fence)
328{
329 const unsigned ib_size_dw = 1024;
330 struct radeon_ib ib;
331 uint64_t dummy;
332 int i, r;
333
334 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
335 if (r) {
336 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
337 return r;
338 }
339
340 dummy = ib.gpu_addr + 1024;
341
342 /* stitch together an VCE create msg */
343 ib.length_dw = 0;
344 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
345 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
346 ib.ptr[ib.length_dw++] = handle;
347
348 ib.ptr[ib.length_dw++] = 0x00000030; /* len */
349 ib.ptr[ib.length_dw++] = 0x01000001; /* create cmd */
350 ib.ptr[ib.length_dw++] = 0x00000000;
351 ib.ptr[ib.length_dw++] = 0x00000042;
352 ib.ptr[ib.length_dw++] = 0x0000000a;
353 ib.ptr[ib.length_dw++] = 0x00000001;
354 ib.ptr[ib.length_dw++] = 0x00000080;
355 ib.ptr[ib.length_dw++] = 0x00000060;
356 ib.ptr[ib.length_dw++] = 0x00000100;
357 ib.ptr[ib.length_dw++] = 0x00000100;
358 ib.ptr[ib.length_dw++] = 0x0000000c;
359 ib.ptr[ib.length_dw++] = 0x00000000;
360
361 ib.ptr[ib.length_dw++] = 0x00000014; /* len */
362 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
363 ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
364 ib.ptr[ib.length_dw++] = dummy;
365 ib.ptr[ib.length_dw++] = 0x00000001;
366
367 for (i = ib.length_dw; i < ib_size_dw; ++i)
368 ib.ptr[i] = 0x0;
369
370 r = radeon_ib_schedule(rdev, &ib, NULL);
371 if (r) {
372 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
373 }
374
375 if (fence)
376 *fence = radeon_fence_ref(ib.fence);
377
378 radeon_ib_free(rdev, &ib);
379
380 return r;
381}
382
383/**
384 * radeon_vce_get_destroy_msg - generate a VCE destroy msg
385 *
386 * @rdev: radeon_device pointer
387 * @ring: ring we should submit the msg to
388 * @handle: VCE session handle to use
389 * @fence: optional fence to return
390 *
391 * Close up a stream for HW test or if userspace failed to do so
392 */
393int radeon_vce_get_destroy_msg(struct radeon_device *rdev, int ring,
394 uint32_t handle, struct radeon_fence **fence)
395{
396 const unsigned ib_size_dw = 1024;
397 struct radeon_ib ib;
398 uint64_t dummy;
399 int i, r;
400
401 r = radeon_ib_get(rdev, ring, &ib, NULL, ib_size_dw * 4);
402 if (r) {
403 DRM_ERROR("radeon: failed to get ib (%d).\n", r);
404 return r;
405 }
406
407 dummy = ib.gpu_addr + 1024;
408
409 /* stitch together an VCE destroy msg */
410 ib.length_dw = 0;
411 ib.ptr[ib.length_dw++] = 0x0000000c; /* len */
412 ib.ptr[ib.length_dw++] = 0x00000001; /* session cmd */
413 ib.ptr[ib.length_dw++] = handle;
414
415 ib.ptr[ib.length_dw++] = 0x00000014; /* len */
416 ib.ptr[ib.length_dw++] = 0x05000005; /* feedback buffer */
417 ib.ptr[ib.length_dw++] = upper_32_bits(dummy);
418 ib.ptr[ib.length_dw++] = dummy;
419 ib.ptr[ib.length_dw++] = 0x00000001;
420
421 ib.ptr[ib.length_dw++] = 0x00000008; /* len */
422 ib.ptr[ib.length_dw++] = 0x02000001; /* destroy cmd */
423
424 for (i = ib.length_dw; i < ib_size_dw; ++i)
425 ib.ptr[i] = 0x0;
426
427 r = radeon_ib_schedule(rdev, &ib, NULL);
428 if (r) {
429 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r);
430 }
431
432 if (fence)
433 *fence = radeon_fence_ref(ib.fence);
434
435 radeon_ib_free(rdev, &ib);
436
437 return r;
438}
439
440/**
441 * radeon_vce_cs_reloc - command submission relocation
442 *
443 * @p: parser context
444 * @lo: address of lower dword
445 * @hi: address of higher dword
446 *
447 * Patch relocation inside command stream with real buffer address
448 */
449int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi)
450{
451 struct radeon_cs_chunk *relocs_chunk;
452 uint64_t offset;
453 unsigned idx;
454
455 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
456 offset = radeon_get_ib_value(p, lo);
457 idx = radeon_get_ib_value(p, hi);
458
459 if (idx >= relocs_chunk->length_dw) {
460 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
461 idx, relocs_chunk->length_dw);
462 return -EINVAL;
463 }
464
df0af440 465 offset += p->relocs_ptr[(idx / 4)]->gpu_offset;
d93f7937
CK
466
467 p->ib.ptr[lo] = offset & 0xFFFFFFFF;
468 p->ib.ptr[hi] = offset >> 32;
469
470 return 0;
471}
472
473/**
474 * radeon_vce_cs_parse - parse and validate the command stream
475 *
476 * @p: parser context
477 *
478 */
479int radeon_vce_cs_parse(struct radeon_cs_parser *p)
480{
481 uint32_t handle = 0;
482 bool destroy = false;
483 int i, r;
484
485 while (p->idx < p->chunks[p->chunk_ib_idx].length_dw) {
486 uint32_t len = radeon_get_ib_value(p, p->idx);
487 uint32_t cmd = radeon_get_ib_value(p, p->idx + 1);
488
489 if ((len < 8) || (len & 3)) {
490 DRM_ERROR("invalid VCE command length (%d)!\n", len);
491 return -EINVAL;
492 }
493
494 switch (cmd) {
495 case 0x00000001: // session
496 handle = radeon_get_ib_value(p, p->idx + 2);
497 break;
498
499 case 0x00000002: // task info
500 case 0x01000001: // create
501 case 0x04000001: // config extension
502 case 0x04000002: // pic control
503 case 0x04000005: // rate control
504 case 0x04000007: // motion estimation
505 case 0x04000008: // rdo
506 break;
507
508 case 0x03000001: // encode
509 r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9);
510 if (r)
511 return r;
512
513 r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11);
514 if (r)
515 return r;
516 break;
517
518 case 0x02000001: // destroy
519 destroy = true;
520 break;
521
522 case 0x05000001: // context buffer
523 case 0x05000004: // video bitstream buffer
524 case 0x05000005: // feedback buffer
525 r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2);
526 if (r)
527 return r;
528 break;
529
530 default:
531 DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
532 return -EINVAL;
533 }
534
535 p->idx += len / 4;
536 }
537
538 if (destroy) {
539 /* IB contains a destroy msg, free the handle */
540 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
541 atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
542
543 return 0;
544 }
545
546 /* create or encode, validate the handle */
547 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
548 if (atomic_read(&p->rdev->vce.handles[i]) == handle)
549 return 0;
550 }
551
552 /* handle not found try to alloc a new one */
553 for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
554 if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
555 p->rdev->vce.filp[i] = p->filp;
556 return 0;
557 }
558 }
559
560 DRM_ERROR("No more free VCE handles!\n");
561 return -EINVAL;
562}
563
564/**
565 * radeon_vce_semaphore_emit - emit a semaphore command
566 *
567 * @rdev: radeon_device pointer
568 * @ring: engine to use
569 * @semaphore: address of semaphore
570 * @emit_wait: true=emit wait, false=emit signal
571 *
572 */
573bool radeon_vce_semaphore_emit(struct radeon_device *rdev,
574 struct radeon_ring *ring,
575 struct radeon_semaphore *semaphore,
576 bool emit_wait)
577{
578 uint64_t addr = semaphore->gpu_addr;
579
580 radeon_ring_write(ring, VCE_CMD_SEMAPHORE);
581 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
582 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
583 radeon_ring_write(ring, 0x01003000 | (emit_wait ? 1 : 0));
584 if (!emit_wait)
585 radeon_ring_write(ring, VCE_CMD_END);
586
587 return true;
588}
589
590/**
591 * radeon_vce_ib_execute - execute indirect buffer
592 *
593 * @rdev: radeon_device pointer
594 * @ib: the IB to execute
595 *
596 */
597void radeon_vce_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib)
598{
599 struct radeon_ring *ring = &rdev->ring[ib->ring];
600 radeon_ring_write(ring, VCE_CMD_IB);
601 radeon_ring_write(ring, ib->gpu_addr);
602 radeon_ring_write(ring, upper_32_bits(ib->gpu_addr));
603 radeon_ring_write(ring, ib->length_dw);
604}
605
606/**
607 * radeon_vce_fence_emit - add a fence command to the ring
608 *
609 * @rdev: radeon_device pointer
610 * @fence: the fence
611 *
612 */
613void radeon_vce_fence_emit(struct radeon_device *rdev,
614 struct radeon_fence *fence)
615{
616 struct radeon_ring *ring = &rdev->ring[fence->ring];
681941c1 617 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr;
d93f7937
CK
618
619 radeon_ring_write(ring, VCE_CMD_FENCE);
620 radeon_ring_write(ring, addr);
621 radeon_ring_write(ring, upper_32_bits(addr));
622 radeon_ring_write(ring, fence->seq);
623 radeon_ring_write(ring, VCE_CMD_TRAP);
624 radeon_ring_write(ring, VCE_CMD_END);
625}
626
627/**
628 * radeon_vce_ring_test - test if VCE ring is working
629 *
630 * @rdev: radeon_device pointer
631 * @ring: the engine to test on
632 *
633 */
634int radeon_vce_ring_test(struct radeon_device *rdev, struct radeon_ring *ring)
635{
636 uint32_t rptr = vce_v1_0_get_rptr(rdev, ring);
637 unsigned i;
638 int r;
639
640 r = radeon_ring_lock(rdev, ring, 16);
641 if (r) {
642 DRM_ERROR("radeon: vce failed to lock ring %d (%d).\n",
643 ring->idx, r);
644 return r;
645 }
646 radeon_ring_write(ring, VCE_CMD_END);
647 radeon_ring_unlock_commit(rdev, ring);
648
649 for (i = 0; i < rdev->usec_timeout; i++) {
650 if (vce_v1_0_get_rptr(rdev, ring) != rptr)
651 break;
652 DRM_UDELAY(1);
653 }
654
655 if (i < rdev->usec_timeout) {
656 DRM_INFO("ring test on %d succeeded in %d usecs\n",
657 ring->idx, i);
658 } else {
659 DRM_ERROR("radeon: ring %d test failed\n",
660 ring->idx);
661 r = -ETIMEDOUT;
662 }
663
664 return r;
665}
666
667/**
668 * radeon_vce_ib_test - test if VCE IBs are working
669 *
670 * @rdev: radeon_device pointer
671 * @ring: the engine to test on
672 *
673 */
674int radeon_vce_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
675{
676 struct radeon_fence *fence = NULL;
677 int r;
678
679 r = radeon_vce_get_create_msg(rdev, ring->idx, 1, NULL);
680 if (r) {
681 DRM_ERROR("radeon: failed to get create msg (%d).\n", r);
682 goto error;
683 }
684
685 r = radeon_vce_get_destroy_msg(rdev, ring->idx, 1, &fence);
686 if (r) {
687 DRM_ERROR("radeon: failed to get destroy ib (%d).\n", r);
688 goto error;
689 }
690
691 r = radeon_fence_wait(fence, false);
692 if (r) {
693 DRM_ERROR("radeon: fence wait failed (%d).\n", r);
694 } else {
695 DRM_INFO("ib test on ring %d succeeded\n", ring->idx);
696 }
697error:
698 radeon_fence_unref(&fence);
699 return r;
700}
This page took 0.06125 seconds and 5 git commands to generate.