Commit | Line | Data |
---|---|---|
d38ceaf9 AD |
1 | /* |
2 | * Copyright 2011 Advanced Micro Devices, Inc. | |
3 | * All Rights Reserved. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the | |
7 | * "Software"), to deal in the Software without restriction, including | |
8 | * without limitation the rights to use, copy, modify, merge, publish, | |
9 | * distribute, sub license, and/or sell copies of the Software, and to | |
10 | * permit persons to whom the Software is furnished to do so, subject to | |
11 | * the following conditions: | |
12 | * | |
13 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
14 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
15 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
16 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | |
17 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | |
18 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | |
19 | * USE OR OTHER DEALINGS IN THE SOFTWARE. | |
20 | * | |
21 | * The above copyright notice and this permission notice (including the | |
22 | * next paragraph) shall be included in all copies or substantial portions | |
23 | * of the Software. | |
24 | * | |
25 | */ | |
26 | /* | |
27 | * Authors: | |
28 | * Christian König <deathsimple@vodafone.de> | |
29 | */ | |
30 | ||
31 | #include <linux/firmware.h> | |
32 | #include <linux/module.h> | |
33 | #include <drm/drmP.h> | |
34 | #include <drm/drm.h> | |
35 | ||
36 | #include "amdgpu.h" | |
37 | #include "amdgpu_pm.h" | |
38 | #include "amdgpu_uvd.h" | |
39 | #include "cikd.h" | |
40 | #include "uvd/uvd_4_2_d.h" | |
41 | ||
42 | /* 1 second timeout */ | |
43 | #define UVD_IDLE_TIMEOUT_MS 1000 | |
44 | ||
45 | /* Firmware Names */ | |
46 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
47 | #define FIRMWARE_BONAIRE "radeon/bonaire_uvd.bin" | |
48 | #define FIRMWARE_KABINI "radeon/kabini_uvd.bin" | |
49 | #define FIRMWARE_KAVERI "radeon/kaveri_uvd.bin" | |
50 | #define FIRMWARE_HAWAII "radeon/hawaii_uvd.bin" | |
51 | #define FIRMWARE_MULLINS "radeon/mullins_uvd.bin" | |
52 | #endif | |
c65444fe JZ |
53 | #define FIRMWARE_TONGA "amdgpu/tonga_uvd.bin" |
54 | #define FIRMWARE_CARRIZO "amdgpu/carrizo_uvd.bin" | |
974ee3db | 55 | #define FIRMWARE_FIJI "amdgpu/fiji_uvd.bin" |
d38ceaf9 AD |
56 | |
57 | /** | |
58 | * amdgpu_uvd_cs_ctx - Command submission parser context | |
59 | * | |
60 | * Used for emulating virtual memory support on UVD 4.2. | |
61 | */ | |
62 | struct amdgpu_uvd_cs_ctx { | |
63 | struct amdgpu_cs_parser *parser; | |
64 | unsigned reg, count; | |
65 | unsigned data0, data1; | |
66 | unsigned idx; | |
67 | unsigned ib_idx; | |
68 | ||
69 | /* does the IB has a msg command */ | |
70 | bool has_msg_cmd; | |
71 | ||
72 | /* minimum buffer sizes */ | |
73 | unsigned *buf_sizes; | |
74 | }; | |
75 | ||
76 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
77 | MODULE_FIRMWARE(FIRMWARE_BONAIRE); | |
78 | MODULE_FIRMWARE(FIRMWARE_KABINI); | |
79 | MODULE_FIRMWARE(FIRMWARE_KAVERI); | |
80 | MODULE_FIRMWARE(FIRMWARE_HAWAII); | |
81 | MODULE_FIRMWARE(FIRMWARE_MULLINS); | |
82 | #endif | |
83 | MODULE_FIRMWARE(FIRMWARE_TONGA); | |
84 | MODULE_FIRMWARE(FIRMWARE_CARRIZO); | |
974ee3db | 85 | MODULE_FIRMWARE(FIRMWARE_FIJI); |
d38ceaf9 AD |
86 | |
87 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev); | |
88 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work); | |
89 | ||
90 | int amdgpu_uvd_sw_init(struct amdgpu_device *adev) | |
91 | { | |
92 | unsigned long bo_size; | |
93 | const char *fw_name; | |
94 | const struct common_firmware_header *hdr; | |
95 | unsigned version_major, version_minor, family_id; | |
96 | int i, r; | |
97 | ||
98 | INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler); | |
99 | ||
100 | switch (adev->asic_type) { | |
101 | #ifdef CONFIG_DRM_AMDGPU_CIK | |
102 | case CHIP_BONAIRE: | |
103 | fw_name = FIRMWARE_BONAIRE; | |
104 | break; | |
105 | case CHIP_KABINI: | |
106 | fw_name = FIRMWARE_KABINI; | |
107 | break; | |
108 | case CHIP_KAVERI: | |
109 | fw_name = FIRMWARE_KAVERI; | |
110 | break; | |
111 | case CHIP_HAWAII: | |
112 | fw_name = FIRMWARE_HAWAII; | |
113 | break; | |
114 | case CHIP_MULLINS: | |
115 | fw_name = FIRMWARE_MULLINS; | |
116 | break; | |
117 | #endif | |
118 | case CHIP_TONGA: | |
119 | fw_name = FIRMWARE_TONGA; | |
120 | break; | |
974ee3db DZ |
121 | case CHIP_FIJI: |
122 | fw_name = FIRMWARE_FIJI; | |
123 | break; | |
d38ceaf9 AD |
124 | case CHIP_CARRIZO: |
125 | fw_name = FIRMWARE_CARRIZO; | |
126 | break; | |
127 | default: | |
128 | return -EINVAL; | |
129 | } | |
130 | ||
131 | r = request_firmware(&adev->uvd.fw, fw_name, adev->dev); | |
132 | if (r) { | |
133 | dev_err(adev->dev, "amdgpu_uvd: Can't load firmware \"%s\"\n", | |
134 | fw_name); | |
135 | return r; | |
136 | } | |
137 | ||
138 | r = amdgpu_ucode_validate(adev->uvd.fw); | |
139 | if (r) { | |
140 | dev_err(adev->dev, "amdgpu_uvd: Can't validate firmware \"%s\"\n", | |
141 | fw_name); | |
142 | release_firmware(adev->uvd.fw); | |
143 | adev->uvd.fw = NULL; | |
144 | return r; | |
145 | } | |
146 | ||
147 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
148 | family_id = le32_to_cpu(hdr->ucode_version) & 0xff; | |
149 | version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff; | |
150 | version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff; | |
151 | DRM_INFO("Found UVD firmware Version: %hu.%hu Family ID: %hu\n", | |
152 | version_major, version_minor, family_id); | |
153 | ||
154 | bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8) | |
155 | + AMDGPU_UVD_STACK_SIZE + AMDGPU_UVD_HEAP_SIZE; | |
156 | r = amdgpu_bo_create(adev, bo_size, PAGE_SIZE, true, | |
157 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &adev->uvd.vcpu_bo); | |
158 | if (r) { | |
159 | dev_err(adev->dev, "(%d) failed to allocate UVD bo\n", r); | |
160 | return r; | |
161 | } | |
162 | ||
163 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | |
164 | if (r) { | |
165 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
166 | dev_err(adev->dev, "(%d) failed to reserve UVD bo\n", r); | |
167 | return r; | |
168 | } | |
169 | ||
170 | r = amdgpu_bo_pin(adev->uvd.vcpu_bo, AMDGPU_GEM_DOMAIN_VRAM, | |
171 | &adev->uvd.gpu_addr); | |
172 | if (r) { | |
173 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
174 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
175 | dev_err(adev->dev, "(%d) UVD bo pin failed\n", r); | |
176 | return r; | |
177 | } | |
178 | ||
179 | r = amdgpu_bo_kmap(adev->uvd.vcpu_bo, &adev->uvd.cpu_addr); | |
180 | if (r) { | |
181 | dev_err(adev->dev, "(%d) UVD map failed\n", r); | |
182 | return r; | |
183 | } | |
184 | ||
185 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
186 | ||
187 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
188 | atomic_set(&adev->uvd.handles[i], 0); | |
189 | adev->uvd.filp[i] = NULL; | |
190 | } | |
191 | ||
192 | /* from uvd v5.0 HW addressing capacity increased to 64 bits */ | |
5fc3aeeb | 193 | if (!amdgpu_ip_block_version_cmp(adev, AMD_IP_BLOCK_TYPE_UVD, 5, 0)) |
d38ceaf9 AD |
194 | adev->uvd.address_64_bit = true; |
195 | ||
196 | return 0; | |
197 | } | |
198 | ||
199 | int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) | |
200 | { | |
201 | int r; | |
202 | ||
203 | if (adev->uvd.vcpu_bo == NULL) | |
204 | return 0; | |
205 | ||
206 | r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); | |
207 | if (!r) { | |
208 | amdgpu_bo_kunmap(adev->uvd.vcpu_bo); | |
209 | amdgpu_bo_unpin(adev->uvd.vcpu_bo); | |
210 | amdgpu_bo_unreserve(adev->uvd.vcpu_bo); | |
211 | } | |
212 | ||
213 | amdgpu_bo_unref(&adev->uvd.vcpu_bo); | |
214 | ||
215 | amdgpu_ring_fini(&adev->uvd.ring); | |
216 | ||
217 | release_firmware(adev->uvd.fw); | |
218 | ||
219 | return 0; | |
220 | } | |
221 | ||
222 | int amdgpu_uvd_suspend(struct amdgpu_device *adev) | |
223 | { | |
224 | unsigned size; | |
225 | void *ptr; | |
226 | const struct common_firmware_header *hdr; | |
227 | int i; | |
228 | ||
229 | if (adev->uvd.vcpu_bo == NULL) | |
230 | return 0; | |
231 | ||
232 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | |
233 | if (atomic_read(&adev->uvd.handles[i])) | |
234 | break; | |
235 | ||
236 | if (i == AMDGPU_MAX_UVD_HANDLES) | |
237 | return 0; | |
238 | ||
239 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
240 | ||
241 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | |
242 | size -= le32_to_cpu(hdr->ucode_size_bytes); | |
243 | ||
244 | ptr = adev->uvd.cpu_addr; | |
245 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
246 | ||
247 | adev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); | |
248 | memcpy(adev->uvd.saved_bo, ptr, size); | |
249 | ||
250 | return 0; | |
251 | } | |
252 | ||
253 | int amdgpu_uvd_resume(struct amdgpu_device *adev) | |
254 | { | |
255 | unsigned size; | |
256 | void *ptr; | |
257 | const struct common_firmware_header *hdr; | |
258 | unsigned offset; | |
259 | ||
260 | if (adev->uvd.vcpu_bo == NULL) | |
261 | return -EINVAL; | |
262 | ||
263 | hdr = (const struct common_firmware_header *)adev->uvd.fw->data; | |
264 | offset = le32_to_cpu(hdr->ucode_array_offset_bytes); | |
265 | memcpy(adev->uvd.cpu_addr, (adev->uvd.fw->data) + offset, | |
266 | (adev->uvd.fw->size) - offset); | |
267 | ||
268 | size = amdgpu_bo_size(adev->uvd.vcpu_bo); | |
269 | size -= le32_to_cpu(hdr->ucode_size_bytes); | |
270 | ptr = adev->uvd.cpu_addr; | |
271 | ptr += le32_to_cpu(hdr->ucode_size_bytes); | |
272 | ||
273 | if (adev->uvd.saved_bo != NULL) { | |
274 | memcpy(ptr, adev->uvd.saved_bo, size); | |
275 | kfree(adev->uvd.saved_bo); | |
276 | adev->uvd.saved_bo = NULL; | |
277 | } else | |
278 | memset(ptr, 0, size); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | void amdgpu_uvd_free_handles(struct amdgpu_device *adev, struct drm_file *filp) | |
284 | { | |
285 | struct amdgpu_ring *ring = &adev->uvd.ring; | |
286 | int i, r; | |
287 | ||
288 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
289 | uint32_t handle = atomic_read(&adev->uvd.handles[i]); | |
290 | if (handle != 0 && adev->uvd.filp[i] == filp) { | |
291 | struct amdgpu_fence *fence; | |
292 | ||
293 | amdgpu_uvd_note_usage(adev); | |
294 | ||
295 | r = amdgpu_uvd_get_destroy_msg(ring, handle, &fence); | |
296 | if (r) { | |
297 | DRM_ERROR("Error destroying UVD (%d)!\n", r); | |
298 | continue; | |
299 | } | |
300 | ||
301 | amdgpu_fence_wait(fence, false); | |
302 | amdgpu_fence_unref(&fence); | |
303 | ||
304 | adev->uvd.filp[i] = NULL; | |
305 | atomic_set(&adev->uvd.handles[i], 0); | |
306 | } | |
307 | } | |
308 | } | |
309 | ||
310 | static void amdgpu_uvd_force_into_uvd_segment(struct amdgpu_bo *rbo) | |
311 | { | |
312 | int i; | |
313 | for (i = 0; i < rbo->placement.num_placement; ++i) { | |
314 | rbo->placements[i].fpfn = 0 >> PAGE_SHIFT; | |
315 | rbo->placements[i].lpfn = (256 * 1024 * 1024) >> PAGE_SHIFT; | |
316 | } | |
317 | } | |
318 | ||
319 | /** | |
320 | * amdgpu_uvd_cs_pass1 - first parsing round | |
321 | * | |
322 | * @ctx: UVD parser context | |
323 | * | |
324 | * Make sure UVD message and feedback buffers are in VRAM and | |
325 | * nobody is violating an 256MB boundary. | |
326 | */ | |
327 | static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx) | |
328 | { | |
329 | struct amdgpu_bo_va_mapping *mapping; | |
330 | struct amdgpu_bo *bo; | |
331 | uint32_t cmd, lo, hi; | |
332 | uint64_t addr; | |
333 | int r = 0; | |
334 | ||
335 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | |
336 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | |
337 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | |
338 | ||
339 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | |
340 | if (mapping == NULL) { | |
341 | DRM_ERROR("Can't find BO for addr 0x%08Lx\n", addr); | |
342 | return -EINVAL; | |
343 | } | |
344 | ||
345 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
346 | /* check if it's a message or feedback command */ | |
347 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | |
348 | if (cmd == 0x0 || cmd == 0x3) { | |
349 | /* yes, force it into VRAM */ | |
350 | uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM; | |
351 | amdgpu_ttm_placement_from_domain(bo, domain); | |
352 | } | |
353 | amdgpu_uvd_force_into_uvd_segment(bo); | |
354 | ||
355 | r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); | |
356 | } | |
357 | ||
358 | return r; | |
359 | } | |
360 | ||
361 | /** | |
362 | * amdgpu_uvd_cs_msg_decode - handle UVD decode message | |
363 | * | |
364 | * @msg: pointer to message structure | |
365 | * @buf_sizes: returned buffer sizes | |
366 | * | |
367 | * Peek into the decode message and calculate the necessary buffer sizes. | |
368 | */ | |
369 | static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[]) | |
370 | { | |
371 | unsigned stream_type = msg[4]; | |
372 | unsigned width = msg[6]; | |
373 | unsigned height = msg[7]; | |
374 | unsigned dpb_size = msg[9]; | |
375 | unsigned pitch = msg[28]; | |
376 | unsigned level = msg[57]; | |
377 | ||
378 | unsigned width_in_mb = width / 16; | |
379 | unsigned height_in_mb = ALIGN(height / 16, 2); | |
380 | unsigned fs_in_mb = width_in_mb * height_in_mb; | |
381 | ||
21df89a5 JZ |
382 | unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; |
383 | unsigned min_ctx_size = 0; | |
d38ceaf9 AD |
384 | |
385 | image_size = width * height; | |
386 | image_size += image_size / 2; | |
387 | image_size = ALIGN(image_size, 1024); | |
388 | ||
389 | switch (stream_type) { | |
390 | case 0: /* H264 */ | |
391 | case 7: /* H264 Perf */ | |
392 | switch(level) { | |
393 | case 30: | |
394 | num_dpb_buffer = 8100 / fs_in_mb; | |
395 | break; | |
396 | case 31: | |
397 | num_dpb_buffer = 18000 / fs_in_mb; | |
398 | break; | |
399 | case 32: | |
400 | num_dpb_buffer = 20480 / fs_in_mb; | |
401 | break; | |
402 | case 41: | |
403 | num_dpb_buffer = 32768 / fs_in_mb; | |
404 | break; | |
405 | case 42: | |
406 | num_dpb_buffer = 34816 / fs_in_mb; | |
407 | break; | |
408 | case 50: | |
409 | num_dpb_buffer = 110400 / fs_in_mb; | |
410 | break; | |
411 | case 51: | |
412 | num_dpb_buffer = 184320 / fs_in_mb; | |
413 | break; | |
414 | default: | |
415 | num_dpb_buffer = 184320 / fs_in_mb; | |
416 | break; | |
417 | } | |
418 | num_dpb_buffer++; | |
419 | if (num_dpb_buffer > 17) | |
420 | num_dpb_buffer = 17; | |
421 | ||
422 | /* reference picture buffer */ | |
423 | min_dpb_size = image_size * num_dpb_buffer; | |
424 | ||
425 | /* macroblock context buffer */ | |
426 | min_dpb_size += width_in_mb * height_in_mb * num_dpb_buffer * 192; | |
427 | ||
428 | /* IT surface buffer */ | |
429 | min_dpb_size += width_in_mb * height_in_mb * 32; | |
430 | break; | |
431 | ||
432 | case 1: /* VC1 */ | |
433 | ||
434 | /* reference picture buffer */ | |
435 | min_dpb_size = image_size * 3; | |
436 | ||
437 | /* CONTEXT_BUFFER */ | |
438 | min_dpb_size += width_in_mb * height_in_mb * 128; | |
439 | ||
440 | /* IT surface buffer */ | |
441 | min_dpb_size += width_in_mb * 64; | |
442 | ||
443 | /* DB surface buffer */ | |
444 | min_dpb_size += width_in_mb * 128; | |
445 | ||
446 | /* BP */ | |
447 | tmp = max(width_in_mb, height_in_mb); | |
448 | min_dpb_size += ALIGN(tmp * 7 * 16, 64); | |
449 | break; | |
450 | ||
451 | case 3: /* MPEG2 */ | |
452 | ||
453 | /* reference picture buffer */ | |
454 | min_dpb_size = image_size * 3; | |
455 | break; | |
456 | ||
457 | case 4: /* MPEG4 */ | |
458 | ||
459 | /* reference picture buffer */ | |
460 | min_dpb_size = image_size * 3; | |
461 | ||
462 | /* CM */ | |
463 | min_dpb_size += width_in_mb * height_in_mb * 64; | |
464 | ||
465 | /* IT surface buffer */ | |
466 | min_dpb_size += ALIGN(width_in_mb * height_in_mb * 32, 64); | |
467 | break; | |
468 | ||
86fa0bdc CK |
469 | case 16: /* H265 */ |
470 | image_size = (ALIGN(width, 16) * ALIGN(height, 16) * 3) / 2; | |
471 | image_size = ALIGN(image_size, 256); | |
472 | ||
473 | num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; | |
474 | min_dpb_size = image_size * num_dpb_buffer; | |
8c8bac59 BZ |
475 | min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16) |
476 | * 16 * num_dpb_buffer + 52 * 1024; | |
86fa0bdc CK |
477 | break; |
478 | ||
d38ceaf9 AD |
479 | default: |
480 | DRM_ERROR("UVD codec not handled %d!\n", stream_type); | |
481 | return -EINVAL; | |
482 | } | |
483 | ||
484 | if (width > pitch) { | |
485 | DRM_ERROR("Invalid UVD decoding target pitch!\n"); | |
486 | return -EINVAL; | |
487 | } | |
488 | ||
489 | if (dpb_size < min_dpb_size) { | |
490 | DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n", | |
491 | dpb_size, min_dpb_size); | |
492 | return -EINVAL; | |
493 | } | |
494 | ||
495 | buf_sizes[0x1] = dpb_size; | |
496 | buf_sizes[0x2] = image_size; | |
8c8bac59 | 497 | buf_sizes[0x4] = min_ctx_size; |
d38ceaf9 AD |
498 | return 0; |
499 | } | |
500 | ||
501 | /** | |
502 | * amdgpu_uvd_cs_msg - handle UVD message | |
503 | * | |
504 | * @ctx: UVD parser context | |
505 | * @bo: buffer object containing the message | |
506 | * @offset: offset into the buffer object | |
507 | * | |
508 | * Peek into the UVD message and extract the session id. | |
509 | * Make sure that we don't open up to many sessions. | |
510 | */ | |
511 | static int amdgpu_uvd_cs_msg(struct amdgpu_uvd_cs_ctx *ctx, | |
512 | struct amdgpu_bo *bo, unsigned offset) | |
513 | { | |
514 | struct amdgpu_device *adev = ctx->parser->adev; | |
515 | int32_t *msg, msg_type, handle; | |
516 | struct fence *f; | |
517 | void *ptr; | |
518 | ||
519 | int i, r; | |
520 | ||
521 | if (offset & 0x3F) { | |
522 | DRM_ERROR("UVD messages must be 64 byte aligned!\n"); | |
523 | return -EINVAL; | |
524 | } | |
525 | ||
526 | f = reservation_object_get_excl(bo->tbo.resv); | |
527 | if (f) { | |
528 | r = amdgpu_fence_wait((struct amdgpu_fence *)f, false); | |
529 | if (r) { | |
530 | DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); | |
531 | return r; | |
532 | } | |
533 | } | |
534 | ||
535 | r = amdgpu_bo_kmap(bo, &ptr); | |
536 | if (r) { | |
537 | DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); | |
538 | return r; | |
539 | } | |
540 | ||
541 | msg = ptr + offset; | |
542 | ||
543 | msg_type = msg[1]; | |
544 | handle = msg[2]; | |
545 | ||
546 | if (handle == 0) { | |
547 | DRM_ERROR("Invalid UVD handle!\n"); | |
548 | return -EINVAL; | |
549 | } | |
550 | ||
551 | if (msg_type == 1) { | |
552 | /* it's a decode msg, calc buffer sizes */ | |
553 | r = amdgpu_uvd_cs_msg_decode(msg, ctx->buf_sizes); | |
554 | amdgpu_bo_kunmap(bo); | |
555 | if (r) | |
556 | return r; | |
557 | ||
558 | } else if (msg_type == 2) { | |
559 | /* it's a destroy msg, free the handle */ | |
560 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | |
561 | atomic_cmpxchg(&adev->uvd.handles[i], handle, 0); | |
562 | amdgpu_bo_kunmap(bo); | |
563 | return 0; | |
564 | } else { | |
565 | /* it's a create msg */ | |
566 | amdgpu_bo_kunmap(bo); | |
567 | ||
568 | if (msg_type != 0) { | |
569 | DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); | |
570 | return -EINVAL; | |
571 | } | |
572 | ||
573 | /* it's a create msg, no special handling needed */ | |
574 | } | |
575 | ||
576 | /* create or decode, validate the handle */ | |
577 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
578 | if (atomic_read(&adev->uvd.handles[i]) == handle) | |
579 | return 0; | |
580 | } | |
581 | ||
582 | /* handle not found try to alloc a new one */ | |
583 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) { | |
584 | if (!atomic_cmpxchg(&adev->uvd.handles[i], 0, handle)) { | |
585 | adev->uvd.filp[i] = ctx->parser->filp; | |
586 | return 0; | |
587 | } | |
588 | } | |
589 | ||
590 | DRM_ERROR("No more free UVD handles!\n"); | |
591 | return -EINVAL; | |
592 | } | |
593 | ||
594 | /** | |
595 | * amdgpu_uvd_cs_pass2 - second parsing round | |
596 | * | |
597 | * @ctx: UVD parser context | |
598 | * | |
599 | * Patch buffer addresses, make sure buffer sizes are correct. | |
600 | */ | |
601 | static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx) | |
602 | { | |
603 | struct amdgpu_bo_va_mapping *mapping; | |
604 | struct amdgpu_bo *bo; | |
605 | struct amdgpu_ib *ib; | |
606 | uint32_t cmd, lo, hi; | |
607 | uint64_t start, end; | |
608 | uint64_t addr; | |
609 | int r; | |
610 | ||
611 | lo = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data0); | |
612 | hi = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->data1); | |
613 | addr = ((uint64_t)lo) | (((uint64_t)hi) << 32); | |
614 | ||
615 | mapping = amdgpu_cs_find_mapping(ctx->parser, addr, &bo); | |
616 | if (mapping == NULL) | |
617 | return -EINVAL; | |
618 | ||
619 | start = amdgpu_bo_gpu_offset(bo); | |
620 | ||
621 | end = (mapping->it.last + 1 - mapping->it.start); | |
622 | end = end * AMDGPU_GPU_PAGE_SIZE + start; | |
623 | ||
624 | addr -= ((uint64_t)mapping->it.start) * AMDGPU_GPU_PAGE_SIZE; | |
625 | start += addr; | |
626 | ||
627 | ib = &ctx->parser->ibs[ctx->ib_idx]; | |
628 | ib->ptr[ctx->data0] = start & 0xFFFFFFFF; | |
629 | ib->ptr[ctx->data1] = start >> 32; | |
630 | ||
631 | cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx) >> 1; | |
632 | if (cmd < 0x4) { | |
633 | if ((end - start) < ctx->buf_sizes[cmd]) { | |
634 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
635 | (unsigned)(end - start), | |
636 | ctx->buf_sizes[cmd]); | |
637 | return -EINVAL; | |
638 | } | |
639 | ||
8c8bac59 BZ |
640 | } else if (cmd == 0x206) { |
641 | if ((end - start) < ctx->buf_sizes[4]) { | |
642 | DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, | |
643 | (unsigned)(end - start), | |
644 | ctx->buf_sizes[4]); | |
645 | return -EINVAL; | |
646 | } | |
d38ceaf9 AD |
647 | } else if ((cmd != 0x100) && (cmd != 0x204)) { |
648 | DRM_ERROR("invalid UVD command %X!\n", cmd); | |
649 | return -EINVAL; | |
650 | } | |
651 | ||
652 | if (!ctx->parser->adev->uvd.address_64_bit) { | |
653 | if ((start >> 28) != ((end - 1) >> 28)) { | |
654 | DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n", | |
655 | start, end); | |
656 | return -EINVAL; | |
657 | } | |
658 | ||
659 | if ((cmd == 0 || cmd == 0x3) && | |
660 | (start >> 28) != (ctx->parser->adev->uvd.gpu_addr >> 28)) { | |
661 | DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", | |
662 | start, end); | |
663 | return -EINVAL; | |
664 | } | |
665 | } | |
666 | ||
667 | if (cmd == 0) { | |
668 | ctx->has_msg_cmd = true; | |
669 | r = amdgpu_uvd_cs_msg(ctx, bo, addr); | |
670 | if (r) | |
671 | return r; | |
672 | } else if (!ctx->has_msg_cmd) { | |
673 | DRM_ERROR("Message needed before other commands are send!\n"); | |
674 | return -EINVAL; | |
675 | } | |
676 | ||
677 | return 0; | |
678 | } | |
679 | ||
680 | /** | |
681 | * amdgpu_uvd_cs_reg - parse register writes | |
682 | * | |
683 | * @ctx: UVD parser context | |
684 | * @cb: callback function | |
685 | * | |
686 | * Parse the register writes, call cb on each complete command. | |
687 | */ | |
688 | static int amdgpu_uvd_cs_reg(struct amdgpu_uvd_cs_ctx *ctx, | |
689 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
690 | { | |
691 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; | |
692 | int i, r; | |
693 | ||
694 | ctx->idx++; | |
695 | for (i = 0; i <= ctx->count; ++i) { | |
696 | unsigned reg = ctx->reg + i; | |
697 | ||
698 | if (ctx->idx >= ib->length_dw) { | |
699 | DRM_ERROR("Register command after end of CS!\n"); | |
700 | return -EINVAL; | |
701 | } | |
702 | ||
703 | switch (reg) { | |
704 | case mmUVD_GPCOM_VCPU_DATA0: | |
705 | ctx->data0 = ctx->idx; | |
706 | break; | |
707 | case mmUVD_GPCOM_VCPU_DATA1: | |
708 | ctx->data1 = ctx->idx; | |
709 | break; | |
710 | case mmUVD_GPCOM_VCPU_CMD: | |
711 | r = cb(ctx); | |
712 | if (r) | |
713 | return r; | |
714 | break; | |
715 | case mmUVD_ENGINE_CNTL: | |
716 | break; | |
717 | default: | |
718 | DRM_ERROR("Invalid reg 0x%X!\n", reg); | |
719 | return -EINVAL; | |
720 | } | |
721 | ctx->idx++; | |
722 | } | |
723 | return 0; | |
724 | } | |
725 | ||
726 | /** | |
727 | * amdgpu_uvd_cs_packets - parse UVD packets | |
728 | * | |
729 | * @ctx: UVD parser context | |
730 | * @cb: callback function | |
731 | * | |
732 | * Parse the command stream packets. | |
733 | */ | |
734 | static int amdgpu_uvd_cs_packets(struct amdgpu_uvd_cs_ctx *ctx, | |
735 | int (*cb)(struct amdgpu_uvd_cs_ctx *ctx)) | |
736 | { | |
737 | struct amdgpu_ib *ib = &ctx->parser->ibs[ctx->ib_idx]; | |
738 | int r; | |
739 | ||
740 | for (ctx->idx = 0 ; ctx->idx < ib->length_dw; ) { | |
741 | uint32_t cmd = amdgpu_get_ib_value(ctx->parser, ctx->ib_idx, ctx->idx); | |
742 | unsigned type = CP_PACKET_GET_TYPE(cmd); | |
743 | switch (type) { | |
744 | case PACKET_TYPE0: | |
745 | ctx->reg = CP_PACKET0_GET_REG(cmd); | |
746 | ctx->count = CP_PACKET_GET_COUNT(cmd); | |
747 | r = amdgpu_uvd_cs_reg(ctx, cb); | |
748 | if (r) | |
749 | return r; | |
750 | break; | |
751 | case PACKET_TYPE2: | |
752 | ++ctx->idx; | |
753 | break; | |
754 | default: | |
755 | DRM_ERROR("Unknown packet type %d !\n", type); | |
756 | return -EINVAL; | |
757 | } | |
758 | } | |
759 | return 0; | |
760 | } | |
761 | ||
762 | /** | |
763 | * amdgpu_uvd_ring_parse_cs - UVD command submission parser | |
764 | * | |
765 | * @parser: Command submission parser context | |
766 | * | |
767 | * Parse the command stream, patch in addresses as necessary. | |
768 | */ | |
769 | int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx) | |
770 | { | |
771 | struct amdgpu_uvd_cs_ctx ctx = {}; | |
772 | unsigned buf_sizes[] = { | |
773 | [0x00000000] = 2048, | |
8c8bac59 BZ |
774 | [0x00000001] = 0xFFFFFFFF, |
775 | [0x00000002] = 0xFFFFFFFF, | |
d38ceaf9 | 776 | [0x00000003] = 2048, |
8c8bac59 | 777 | [0x00000004] = 0xFFFFFFFF, |
d38ceaf9 AD |
778 | }; |
779 | struct amdgpu_ib *ib = &parser->ibs[ib_idx]; | |
780 | int r; | |
781 | ||
782 | if (ib->length_dw % 16) { | |
783 | DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n", | |
784 | ib->length_dw); | |
785 | return -EINVAL; | |
786 | } | |
787 | ||
788 | ctx.parser = parser; | |
789 | ctx.buf_sizes = buf_sizes; | |
790 | ctx.ib_idx = ib_idx; | |
791 | ||
792 | /* first round, make sure the buffers are actually in the UVD segment */ | |
793 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass1); | |
794 | if (r) | |
795 | return r; | |
796 | ||
797 | /* second round, patch buffer addresses into the command stream */ | |
798 | r = amdgpu_uvd_cs_packets(&ctx, amdgpu_uvd_cs_pass2); | |
799 | if (r) | |
800 | return r; | |
801 | ||
802 | if (!ctx.has_msg_cmd) { | |
803 | DRM_ERROR("UVD-IBs need a msg command!\n"); | |
804 | return -EINVAL; | |
805 | } | |
806 | ||
807 | amdgpu_uvd_note_usage(ctx.parser->adev); | |
808 | ||
809 | return 0; | |
810 | } | |
811 | ||
812 | static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, | |
813 | struct amdgpu_bo *bo, | |
814 | struct amdgpu_fence **fence) | |
815 | { | |
816 | struct ttm_validate_buffer tv; | |
817 | struct ww_acquire_ctx ticket; | |
818 | struct list_head head; | |
819 | struct amdgpu_ib ib; | |
820 | uint64_t addr; | |
821 | int i, r; | |
822 | ||
823 | memset(&tv, 0, sizeof(tv)); | |
824 | tv.bo = &bo->tbo; | |
825 | ||
826 | INIT_LIST_HEAD(&head); | |
827 | list_add(&tv.head, &head); | |
828 | ||
829 | r = ttm_eu_reserve_buffers(&ticket, &head, true, NULL); | |
830 | if (r) | |
831 | return r; | |
832 | ||
833 | if (!bo->adev->uvd.address_64_bit) { | |
834 | amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM); | |
835 | amdgpu_uvd_force_into_uvd_segment(bo); | |
836 | } | |
837 | ||
838 | r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); | |
839 | if (r) | |
840 | goto err; | |
841 | ||
842 | r = amdgpu_ib_get(ring, NULL, 64, &ib); | |
843 | if (r) | |
844 | goto err; | |
845 | ||
846 | addr = amdgpu_bo_gpu_offset(bo); | |
847 | ib.ptr[0] = PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0); | |
848 | ib.ptr[1] = addr; | |
849 | ib.ptr[2] = PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0); | |
850 | ib.ptr[3] = addr >> 32; | |
851 | ib.ptr[4] = PACKET0(mmUVD_GPCOM_VCPU_CMD, 0); | |
852 | ib.ptr[5] = 0; | |
853 | for (i = 6; i < 16; ++i) | |
854 | ib.ptr[i] = PACKET2(0); | |
855 | ib.length_dw = 16; | |
856 | ||
857 | r = amdgpu_ib_schedule(ring->adev, 1, &ib, AMDGPU_FENCE_OWNER_UNDEFINED); | |
858 | if (r) | |
859 | goto err; | |
860 | ttm_eu_fence_buffer_objects(&ticket, &head, &ib.fence->base); | |
861 | ||
862 | if (fence) | |
863 | *fence = amdgpu_fence_ref(ib.fence); | |
864 | ||
865 | amdgpu_ib_free(ring->adev, &ib); | |
866 | amdgpu_bo_unref(&bo); | |
867 | return 0; | |
868 | ||
869 | err: | |
870 | ttm_eu_backoff_reservation(&ticket, &head); | |
871 | return r; | |
872 | } | |
873 | ||
874 | /* multiple fence commands without any stream commands in between can | |
875 | crash the vcpu so just try to emmit a dummy create/destroy msg to | |
876 | avoid this */ | |
877 | int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, | |
878 | struct amdgpu_fence **fence) | |
879 | { | |
880 | struct amdgpu_device *adev = ring->adev; | |
881 | struct amdgpu_bo *bo; | |
882 | uint32_t *msg; | |
883 | int r, i; | |
884 | ||
885 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | |
886 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); | |
887 | if (r) | |
888 | return r; | |
889 | ||
890 | r = amdgpu_bo_reserve(bo, false); | |
891 | if (r) { | |
892 | amdgpu_bo_unref(&bo); | |
893 | return r; | |
894 | } | |
895 | ||
896 | r = amdgpu_bo_kmap(bo, (void **)&msg); | |
897 | if (r) { | |
898 | amdgpu_bo_unreserve(bo); | |
899 | amdgpu_bo_unref(&bo); | |
900 | return r; | |
901 | } | |
902 | ||
903 | /* stitch together an UVD create msg */ | |
904 | msg[0] = cpu_to_le32(0x00000de4); | |
905 | msg[1] = cpu_to_le32(0x00000000); | |
906 | msg[2] = cpu_to_le32(handle); | |
907 | msg[3] = cpu_to_le32(0x00000000); | |
908 | msg[4] = cpu_to_le32(0x00000000); | |
909 | msg[5] = cpu_to_le32(0x00000000); | |
910 | msg[6] = cpu_to_le32(0x00000000); | |
911 | msg[7] = cpu_to_le32(0x00000780); | |
912 | msg[8] = cpu_to_le32(0x00000440); | |
913 | msg[9] = cpu_to_le32(0x00000000); | |
914 | msg[10] = cpu_to_le32(0x01b37000); | |
915 | for (i = 11; i < 1024; ++i) | |
916 | msg[i] = cpu_to_le32(0x0); | |
917 | ||
918 | amdgpu_bo_kunmap(bo); | |
919 | amdgpu_bo_unreserve(bo); | |
920 | ||
921 | return amdgpu_uvd_send_msg(ring, bo, fence); | |
922 | } | |
923 | ||
924 | int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, | |
925 | struct amdgpu_fence **fence) | |
926 | { | |
927 | struct amdgpu_device *adev = ring->adev; | |
928 | struct amdgpu_bo *bo; | |
929 | uint32_t *msg; | |
930 | int r, i; | |
931 | ||
932 | r = amdgpu_bo_create(adev, 1024, PAGE_SIZE, true, | |
933 | AMDGPU_GEM_DOMAIN_VRAM, 0, NULL, &bo); | |
934 | if (r) | |
935 | return r; | |
936 | ||
937 | r = amdgpu_bo_reserve(bo, false); | |
938 | if (r) { | |
939 | amdgpu_bo_unref(&bo); | |
940 | return r; | |
941 | } | |
942 | ||
943 | r = amdgpu_bo_kmap(bo, (void **)&msg); | |
944 | if (r) { | |
945 | amdgpu_bo_unreserve(bo); | |
946 | amdgpu_bo_unref(&bo); | |
947 | return r; | |
948 | } | |
949 | ||
950 | /* stitch together an UVD destroy msg */ | |
951 | msg[0] = cpu_to_le32(0x00000de4); | |
952 | msg[1] = cpu_to_le32(0x00000002); | |
953 | msg[2] = cpu_to_le32(handle); | |
954 | msg[3] = cpu_to_le32(0x00000000); | |
955 | for (i = 4; i < 1024; ++i) | |
956 | msg[i] = cpu_to_le32(0x0); | |
957 | ||
958 | amdgpu_bo_kunmap(bo); | |
959 | amdgpu_bo_unreserve(bo); | |
960 | ||
961 | return amdgpu_uvd_send_msg(ring, bo, fence); | |
962 | } | |
963 | ||
964 | static void amdgpu_uvd_idle_work_handler(struct work_struct *work) | |
965 | { | |
966 | struct amdgpu_device *adev = | |
967 | container_of(work, struct amdgpu_device, uvd.idle_work.work); | |
968 | unsigned i, fences, handles = 0; | |
969 | ||
970 | fences = amdgpu_fence_count_emitted(&adev->uvd.ring); | |
971 | ||
972 | for (i = 0; i < AMDGPU_MAX_UVD_HANDLES; ++i) | |
973 | if (atomic_read(&adev->uvd.handles[i])) | |
974 | ++handles; | |
975 | ||
976 | if (fences == 0 && handles == 0) { | |
977 | if (adev->pm.dpm_enabled) { | |
978 | amdgpu_dpm_enable_uvd(adev, false); | |
979 | } else { | |
980 | amdgpu_asic_set_uvd_clocks(adev, 0, 0); | |
981 | } | |
982 | } else { | |
983 | schedule_delayed_work(&adev->uvd.idle_work, | |
984 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
985 | } | |
986 | } | |
987 | ||
988 | static void amdgpu_uvd_note_usage(struct amdgpu_device *adev) | |
989 | { | |
990 | bool set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work); | |
991 | set_clocks &= schedule_delayed_work(&adev->uvd.idle_work, | |
992 | msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS)); | |
993 | ||
994 | if (set_clocks) { | |
995 | if (adev->pm.dpm_enabled) { | |
996 | amdgpu_dpm_enable_uvd(adev, true); | |
997 | } else { | |
998 | amdgpu_asic_set_uvd_clocks(adev, 53300, 40000); | |
999 | } | |
1000 | } | |
1001 | } |