2 * Copyright 2011 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
39 /* 1 second timeout */
40 #define UVD_IDLE_TIMEOUT_MS 1000
43 #define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
44 #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45 #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46 #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
48 MODULE_FIRMWARE(FIRMWARE_RV710
);
49 MODULE_FIRMWARE(FIRMWARE_CYPRESS
);
50 MODULE_FIRMWARE(FIRMWARE_SUMO
);
51 MODULE_FIRMWARE(FIRMWARE_TAHITI
);
53 static void radeon_uvd_idle_work_handler(struct work_struct
*work
);
55 int radeon_uvd_init(struct radeon_device
*rdev
)
57 struct platform_device
*pdev
;
58 unsigned long bo_size
;
62 INIT_DELAYED_WORK(&rdev
->uvd
.idle_work
, radeon_uvd_idle_work_handler
);
64 pdev
= platform_device_register_simple("radeon_uvd", 0, NULL
, 0);
67 dev_err(rdev
->dev
, "radeon_uvd: Failed to register firmware\n");
71 switch (rdev
->family
) {
75 fw_name
= FIRMWARE_RV710
;
83 fw_name
= FIRMWARE_CYPRESS
;
93 fw_name
= FIRMWARE_SUMO
;
100 fw_name
= FIRMWARE_TAHITI
;
107 r
= request_firmware(&rdev
->uvd_fw
, fw_name
, &pdev
->dev
);
109 dev_err(rdev
->dev
, "radeon_uvd: Can't load firmware \"%s\"\n",
111 platform_device_unregister(pdev
);
115 platform_device_unregister(pdev
);
117 bo_size
= RADEON_GPU_PAGE_ALIGN(rdev
->uvd_fw
->size
+ 8) +
118 RADEON_UVD_STACK_SIZE
+ RADEON_UVD_HEAP_SIZE
;
119 r
= radeon_bo_create(rdev
, bo_size
, PAGE_SIZE
, true,
120 RADEON_GEM_DOMAIN_VRAM
, NULL
, &rdev
->uvd
.vcpu_bo
);
122 dev_err(rdev
->dev
, "(%d) failed to allocate UVD bo\n", r
);
126 r
= radeon_uvd_resume(rdev
);
130 memset(rdev
->uvd
.cpu_addr
, 0, bo_size
);
131 memcpy(rdev
->uvd
.cpu_addr
, rdev
->uvd_fw
->data
, rdev
->uvd_fw
->size
);
133 r
= radeon_uvd_suspend(rdev
);
137 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
138 atomic_set(&rdev
->uvd
.handles
[i
], 0);
139 rdev
->uvd
.filp
[i
] = NULL
;
145 void radeon_uvd_fini(struct radeon_device
*rdev
)
147 radeon_uvd_suspend(rdev
);
148 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
151 int radeon_uvd_suspend(struct radeon_device
*rdev
)
155 if (rdev
->uvd
.vcpu_bo
== NULL
)
158 r
= radeon_bo_reserve(rdev
->uvd
.vcpu_bo
, false);
160 radeon_bo_kunmap(rdev
->uvd
.vcpu_bo
);
161 radeon_bo_unpin(rdev
->uvd
.vcpu_bo
);
162 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
167 int radeon_uvd_resume(struct radeon_device
*rdev
)
171 if (rdev
->uvd
.vcpu_bo
== NULL
)
174 r
= radeon_bo_reserve(rdev
->uvd
.vcpu_bo
, false);
176 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
177 dev_err(rdev
->dev
, "(%d) failed to reserve UVD bo\n", r
);
181 r
= radeon_bo_pin(rdev
->uvd
.vcpu_bo
, RADEON_GEM_DOMAIN_VRAM
,
182 &rdev
->uvd
.gpu_addr
);
184 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
185 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
186 dev_err(rdev
->dev
, "(%d) UVD bo pin failed\n", r
);
190 r
= radeon_bo_kmap(rdev
->uvd
.vcpu_bo
, &rdev
->uvd
.cpu_addr
);
192 dev_err(rdev
->dev
, "(%d) UVD map failed\n", r
);
196 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
201 void radeon_uvd_force_into_uvd_segment(struct radeon_bo
*rbo
)
203 rbo
->placement
.fpfn
= 0 >> PAGE_SHIFT
;
204 rbo
->placement
.lpfn
= (256 * 1024 * 1024) >> PAGE_SHIFT
;
207 void radeon_uvd_free_handles(struct radeon_device
*rdev
, struct drm_file
*filp
)
210 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
211 if (rdev
->uvd
.filp
[i
] == filp
) {
212 uint32_t handle
= atomic_read(&rdev
->uvd
.handles
[i
]);
213 struct radeon_fence
*fence
;
215 r
= radeon_uvd_get_destroy_msg(rdev
,
216 R600_RING_TYPE_UVD_INDEX
, handle
, &fence
);
218 DRM_ERROR("Error destroying UVD (%d)!\n", r
);
222 radeon_fence_wait(fence
, false);
223 radeon_fence_unref(&fence
);
225 rdev
->uvd
.filp
[i
] = NULL
;
226 atomic_set(&rdev
->uvd
.handles
[i
], 0);
231 static int radeon_uvd_cs_msg_decode(uint32_t *msg
, unsigned buf_sizes
[])
233 unsigned stream_type
= msg
[4];
234 unsigned width
= msg
[6];
235 unsigned height
= msg
[7];
236 unsigned dpb_size
= msg
[9];
237 unsigned pitch
= msg
[28];
239 unsigned width_in_mb
= width
/ 16;
240 unsigned height_in_mb
= ALIGN(height
/ 16, 2);
242 unsigned image_size
, tmp
, min_dpb_size
;
244 image_size
= width
* height
;
245 image_size
+= image_size
/ 2;
246 image_size
= ALIGN(image_size
, 1024);
248 switch (stream_type
) {
251 /* reference picture buffer */
252 min_dpb_size
= image_size
* 17;
254 /* macroblock context buffer */
255 min_dpb_size
+= width_in_mb
* height_in_mb
* 17 * 192;
257 /* IT surface buffer */
258 min_dpb_size
+= width_in_mb
* height_in_mb
* 32;
263 /* reference picture buffer */
264 min_dpb_size
= image_size
* 3;
267 min_dpb_size
+= width_in_mb
* height_in_mb
* 128;
269 /* IT surface buffer */
270 min_dpb_size
+= width_in_mb
* 64;
272 /* DB surface buffer */
273 min_dpb_size
+= width_in_mb
* 128;
276 tmp
= max(width_in_mb
, height_in_mb
);
277 min_dpb_size
+= ALIGN(tmp
* 7 * 16, 64);
282 /* reference picture buffer */
283 min_dpb_size
= image_size
* 3;
288 /* reference picture buffer */
289 min_dpb_size
= image_size
* 3;
292 min_dpb_size
+= width_in_mb
* height_in_mb
* 64;
294 /* IT surface buffer */
295 min_dpb_size
+= ALIGN(width_in_mb
* height_in_mb
* 32, 64);
299 DRM_ERROR("UVD codec not handled %d!\n", stream_type
);
304 DRM_ERROR("Invalid UVD decoding target pitch!\n");
308 if (dpb_size
< min_dpb_size
) {
309 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
310 dpb_size
, min_dpb_size
);
314 buf_sizes
[0x1] = dpb_size
;
315 buf_sizes
[0x2] = image_size
;
319 static int radeon_uvd_cs_msg(struct radeon_cs_parser
*p
, struct radeon_bo
*bo
,
320 unsigned offset
, unsigned buf_sizes
[])
322 int32_t *msg
, msg_type
, handle
;
328 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
332 r
= radeon_bo_kmap(bo
, &ptr
);
342 DRM_ERROR("Invalid UVD handle!\n");
347 /* it's a decode msg, calc buffer sizes */
348 r
= radeon_uvd_cs_msg_decode(msg
, buf_sizes
);
349 radeon_bo_kunmap(bo
);
353 } else if (msg_type
== 2) {
354 /* it's a destroy msg, free the handle */
355 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
)
356 atomic_cmpxchg(&p
->rdev
->uvd
.handles
[i
], handle
, 0);
357 radeon_bo_kunmap(bo
);
360 /* it's a create msg, no special handling needed */
361 radeon_bo_kunmap(bo
);
364 /* create or decode, validate the handle */
365 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
366 if (atomic_read(&p
->rdev
->uvd
.handles
[i
]) == handle
)
370 /* handle not found try to alloc a new one */
371 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
372 if (!atomic_cmpxchg(&p
->rdev
->uvd
.handles
[i
], 0, handle
)) {
373 p
->rdev
->uvd
.filp
[i
] = p
->filp
;
378 DRM_ERROR("No more free UVD handles!\n");
382 static int radeon_uvd_cs_reloc(struct radeon_cs_parser
*p
,
383 int data0
, int data1
,
384 unsigned buf_sizes
[])
386 struct radeon_cs_chunk
*relocs_chunk
;
387 struct radeon_cs_reloc
*reloc
;
388 unsigned idx
, cmd
, offset
;
392 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
393 offset
= radeon_get_ib_value(p
, data0
);
394 idx
= radeon_get_ib_value(p
, data1
);
395 if (idx
>= relocs_chunk
->length_dw
) {
396 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
397 idx
, relocs_chunk
->length_dw
);
401 reloc
= p
->relocs_ptr
[(idx
/ 4)];
402 start
= reloc
->lobj
.gpu_offset
;
403 end
= start
+ radeon_bo_size(reloc
->robj
);
406 p
->ib
.ptr
[data0
] = start
& 0xFFFFFFFF;
407 p
->ib
.ptr
[data1
] = start
>> 32;
409 cmd
= radeon_get_ib_value(p
, p
->idx
) >> 1;
412 if ((end
- start
) < buf_sizes
[cmd
]) {
413 DRM_ERROR("buffer to small (%d / %d)!\n",
414 (unsigned)(end
- start
), buf_sizes
[cmd
]);
418 } else if (cmd
!= 0x100) {
419 DRM_ERROR("invalid UVD command %X!\n", cmd
);
423 if ((start
>> 28) != (end
>> 28)) {
424 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
429 /* TODO: is this still necessary on NI+ ? */
430 if ((cmd
== 0 || cmd
== 0x3) &&
431 (start
>> 28) != (p
->rdev
->uvd
.gpu_addr
>> 28)) {
432 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
438 r
= radeon_uvd_cs_msg(p
, reloc
->robj
, offset
, buf_sizes
);
446 static int radeon_uvd_cs_reg(struct radeon_cs_parser
*p
,
447 struct radeon_cs_packet
*pkt
,
448 int *data0
, int *data1
,
449 unsigned buf_sizes
[])
454 for (i
= 0; i
<= pkt
->count
; ++i
) {
455 switch (pkt
->reg
+ i
*4) {
456 case UVD_GPCOM_VCPU_DATA0
:
459 case UVD_GPCOM_VCPU_DATA1
:
462 case UVD_GPCOM_VCPU_CMD
:
463 r
= radeon_uvd_cs_reloc(p
, *data0
, *data1
, buf_sizes
);
467 case UVD_ENGINE_CNTL
:
470 DRM_ERROR("Invalid reg 0x%X!\n",
479 int radeon_uvd_cs_parse(struct radeon_cs_parser
*p
)
481 struct radeon_cs_packet pkt
;
482 int r
, data0
= 0, data1
= 0;
484 /* minimum buffer sizes */
485 unsigned buf_sizes
[] = {
487 [0x00000001] = 32 * 1024 * 1024,
488 [0x00000002] = 2048 * 1152 * 3,
492 if (p
->chunks
[p
->chunk_ib_idx
].length_dw
% 16) {
493 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
494 p
->chunks
[p
->chunk_ib_idx
].length_dw
);
498 if (p
->chunk_relocs_idx
== -1) {
499 DRM_ERROR("No relocation chunk !\n");
505 r
= radeon_cs_packet_parse(p
, &pkt
, p
->idx
);
509 case RADEON_PACKET_TYPE0
:
510 r
= radeon_uvd_cs_reg(p
, &pkt
, &data0
,
515 case RADEON_PACKET_TYPE2
:
516 p
->idx
+= pkt
.count
+ 2;
519 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
522 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
526 static int radeon_uvd_send_msg(struct radeon_device
*rdev
,
527 int ring
, struct radeon_bo
*bo
,
528 struct radeon_fence
**fence
)
530 struct ttm_validate_buffer tv
;
531 struct list_head head
;
536 memset(&tv
, 0, sizeof(tv
));
539 INIT_LIST_HEAD(&head
);
540 list_add(&tv
.head
, &head
);
542 r
= ttm_eu_reserve_buffers(&head
);
546 radeon_ttm_placement_from_domain(bo
, RADEON_GEM_DOMAIN_VRAM
);
547 radeon_uvd_force_into_uvd_segment(bo
);
549 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
551 ttm_eu_backoff_reservation(&head
);
555 r
= radeon_ib_get(rdev
, ring
, &ib
, NULL
, 16);
557 ttm_eu_backoff_reservation(&head
);
561 addr
= radeon_bo_gpu_offset(bo
);
562 ib
.ptr
[0] = PACKET0(UVD_GPCOM_VCPU_DATA0
, 0);
564 ib
.ptr
[2] = PACKET0(UVD_GPCOM_VCPU_DATA1
, 0);
565 ib
.ptr
[3] = addr
>> 32;
566 ib
.ptr
[4] = PACKET0(UVD_GPCOM_VCPU_CMD
, 0);
568 for (i
= 6; i
< 16; ++i
)
569 ib
.ptr
[i
] = PACKET2(0);
572 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
574 ttm_eu_backoff_reservation(&head
);
577 ttm_eu_fence_buffer_objects(&head
, ib
.fence
);
580 *fence
= radeon_fence_ref(ib
.fence
);
582 radeon_ib_free(rdev
, &ib
);
583 radeon_bo_unref(&bo
);
587 /* multiple fence commands without any stream commands in between can
588 crash the vcpu so just try to emmit a dummy create/destroy msg to
590 int radeon_uvd_get_create_msg(struct radeon_device
*rdev
, int ring
,
591 uint32_t handle
, struct radeon_fence
**fence
)
593 struct radeon_bo
*bo
;
597 r
= radeon_bo_create(rdev
, 1024, PAGE_SIZE
, true,
598 RADEON_GEM_DOMAIN_VRAM
, NULL
, &bo
);
602 r
= radeon_bo_reserve(bo
, false);
604 radeon_bo_unref(&bo
);
608 r
= radeon_bo_kmap(bo
, (void **)&msg
);
610 radeon_bo_unreserve(bo
);
611 radeon_bo_unref(&bo
);
615 /* stitch together an UVD create msg */
626 msg
[10] = 0x01b37000;
627 for (i
= 11; i
< 1024; ++i
)
630 radeon_bo_kunmap(bo
);
631 radeon_bo_unreserve(bo
);
633 return radeon_uvd_send_msg(rdev
, ring
, bo
, fence
);
636 int radeon_uvd_get_destroy_msg(struct radeon_device
*rdev
, int ring
,
637 uint32_t handle
, struct radeon_fence
**fence
)
639 struct radeon_bo
*bo
;
643 r
= radeon_bo_create(rdev
, 1024, PAGE_SIZE
, true,
644 RADEON_GEM_DOMAIN_VRAM
, NULL
, &bo
);
648 r
= radeon_bo_reserve(bo
, false);
650 radeon_bo_unref(&bo
);
654 r
= radeon_bo_kmap(bo
, (void **)&msg
);
656 radeon_bo_unreserve(bo
);
657 radeon_bo_unref(&bo
);
661 /* stitch together an UVD destroy msg */
666 for (i
= 4; i
< 1024; ++i
)
669 radeon_bo_kunmap(bo
);
670 radeon_bo_unreserve(bo
);
672 return radeon_uvd_send_msg(rdev
, ring
, bo
, fence
);
675 static void radeon_uvd_idle_work_handler(struct work_struct
*work
)
677 struct radeon_device
*rdev
=
678 container_of(work
, struct radeon_device
, uvd
.idle_work
.work
);
680 if (radeon_fence_count_emitted(rdev
, R600_RING_TYPE_UVD_INDEX
) == 0)
681 radeon_set_uvd_clocks(rdev
, 0, 0);
683 schedule_delayed_work(&rdev
->uvd
.idle_work
,
684 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS
));
687 void radeon_uvd_note_usage(struct radeon_device
*rdev
)
689 bool set_clocks
= !cancel_delayed_work_sync(&rdev
->uvd
.idle_work
);
690 set_clocks
&= schedule_delayed_work(&rdev
->uvd
.idle_work
,
691 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS
));
693 radeon_set_uvd_clocks(rdev
, 53300, 40000);
696 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq
,
697 unsigned target_freq
,
701 unsigned post_div
= vco_freq
/ target_freq
;
703 /* adjust to post divider minimum value */
704 if (post_div
< pd_min
)
707 /* we alway need a frequency less than or equal the target */
708 if ((vco_freq
/ post_div
) > target_freq
)
711 /* post dividers above a certain value must be even */
712 if (post_div
> pd_even
&& post_div
% 2)
719 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
721 * @rdev: radeon_device pointer
724 * @vco_min: minimum VCO frequency
725 * @vco_max: maximum VCO frequency
726 * @fb_factor: factor to multiply vco freq with
727 * @fb_mask: limit and bitmask for feedback divider
728 * @pd_min: post divider minimum
729 * @pd_max: post divider maximum
730 * @pd_even: post divider must be even above this value
731 * @optimal_fb_div: resulting feedback divider
732 * @optimal_vclk_div: resulting vclk post divider
733 * @optimal_dclk_div: resulting dclk post divider
735 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
736 * Returns zero on success -EINVAL on error.
738 int radeon_uvd_calc_upll_dividers(struct radeon_device
*rdev
,
739 unsigned vclk
, unsigned dclk
,
740 unsigned vco_min
, unsigned vco_max
,
741 unsigned fb_factor
, unsigned fb_mask
,
742 unsigned pd_min
, unsigned pd_max
,
744 unsigned *optimal_fb_div
,
745 unsigned *optimal_vclk_div
,
746 unsigned *optimal_dclk_div
)
748 unsigned vco_freq
, ref_freq
= rdev
->clock
.spll
.reference_freq
;
750 /* start off with something large */
751 unsigned optimal_score
= ~0;
753 /* loop through vco from low to high */
754 vco_min
= max(max(vco_min
, vclk
), dclk
);
755 for (vco_freq
= vco_min
; vco_freq
<= vco_max
; vco_freq
+= 100) {
757 uint64_t fb_div
= (uint64_t)vco_freq
* fb_factor
;
758 unsigned vclk_div
, dclk_div
, score
;
760 do_div(fb_div
, ref_freq
);
762 /* fb div out of range ? */
763 if (fb_div
> fb_mask
)
764 break; /* it can oly get worse */
768 /* calc vclk divider with current vco freq */
769 vclk_div
= radeon_uvd_calc_upll_post_div(vco_freq
, vclk
,
771 if (vclk_div
> pd_max
)
772 break; /* vco is too big, it has to stop */
774 /* calc dclk divider with current vco freq */
775 dclk_div
= radeon_uvd_calc_upll_post_div(vco_freq
, dclk
,
777 if (vclk_div
> pd_max
)
778 break; /* vco is too big, it has to stop */
780 /* calc score with current vco freq */
781 score
= vclk
- (vco_freq
/ vclk_div
) + dclk
- (vco_freq
/ dclk_div
);
783 /* determine if this vco setting is better than current optimal settings */
784 if (score
< optimal_score
) {
785 *optimal_fb_div
= fb_div
;
786 *optimal_vclk_div
= vclk_div
;
787 *optimal_dclk_div
= dclk_div
;
788 optimal_score
= score
;
789 if (optimal_score
== 0)
790 break; /* it can't get better than this */
794 /* did we found a valid setup ? */
795 if (optimal_score
== ~0)
801 int radeon_uvd_send_upll_ctlreq(struct radeon_device
*rdev
,
802 unsigned cg_upll_func_cntl
)
806 /* make sure UPLL_CTLREQ is deasserted */
807 WREG32_P(cg_upll_func_cntl
, 0, ~UPLL_CTLREQ_MASK
);
811 /* assert UPLL_CTLREQ */
812 WREG32_P(cg_upll_func_cntl
, UPLL_CTLREQ_MASK
, ~UPLL_CTLREQ_MASK
);
814 /* wait for CTLACK and CTLACK2 to get asserted */
815 for (i
= 0; i
< 100; ++i
) {
816 uint32_t mask
= UPLL_CTLACK_MASK
| UPLL_CTLACK2_MASK
;
817 if ((RREG32(cg_upll_func_cntl
) & mask
) == mask
)
822 /* deassert UPLL_CTLREQ */
823 WREG32_P(cg_upll_func_cntl
, 0, ~UPLL_CTLREQ_MASK
);
826 DRM_ERROR("Timeout setting UVD clocks!\n");