2 * Copyright 2011 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sub license, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 * The above copyright notice and this permission notice (including the
22 * next paragraph) shall be included in all copies or substantial portions
28 * Christian König <deathsimple@vodafone.de>
31 #include <linux/firmware.h>
32 #include <linux/module.h>
39 /* 1 second timeout */
40 #define UVD_IDLE_TIMEOUT_MS 1000
43 #define FIRMWARE_RV710 "radeon/RV710_uvd.bin"
44 #define FIRMWARE_CYPRESS "radeon/CYPRESS_uvd.bin"
45 #define FIRMWARE_SUMO "radeon/SUMO_uvd.bin"
46 #define FIRMWARE_TAHITI "radeon/TAHITI_uvd.bin"
47 #define FIRMWARE_BONAIRE "radeon/BONAIRE_uvd.bin"
49 MODULE_FIRMWARE(FIRMWARE_RV710
);
50 MODULE_FIRMWARE(FIRMWARE_CYPRESS
);
51 MODULE_FIRMWARE(FIRMWARE_SUMO
);
52 MODULE_FIRMWARE(FIRMWARE_TAHITI
);
53 MODULE_FIRMWARE(FIRMWARE_BONAIRE
);
55 static void radeon_uvd_idle_work_handler(struct work_struct
*work
);
57 int radeon_uvd_init(struct radeon_device
*rdev
)
59 struct platform_device
*pdev
;
60 unsigned long bo_size
;
64 INIT_DELAYED_WORK(&rdev
->uvd
.idle_work
, radeon_uvd_idle_work_handler
);
66 pdev
= platform_device_register_simple("radeon_uvd", 0, NULL
, 0);
69 dev_err(rdev
->dev
, "radeon_uvd: Failed to register firmware\n");
73 switch (rdev
->family
) {
77 fw_name
= FIRMWARE_RV710
;
85 fw_name
= FIRMWARE_CYPRESS
;
95 fw_name
= FIRMWARE_SUMO
;
102 fw_name
= FIRMWARE_TAHITI
;
108 fw_name
= FIRMWARE_BONAIRE
;
115 r
= request_firmware(&rdev
->uvd_fw
, fw_name
, &pdev
->dev
);
117 dev_err(rdev
->dev
, "radeon_uvd: Can't load firmware \"%s\"\n",
119 platform_device_unregister(pdev
);
123 platform_device_unregister(pdev
);
125 bo_size
= RADEON_GPU_PAGE_ALIGN(rdev
->uvd_fw
->size
+ 8) +
126 RADEON_UVD_STACK_SIZE
+ RADEON_UVD_HEAP_SIZE
;
127 r
= radeon_bo_create(rdev
, bo_size
, PAGE_SIZE
, true,
128 RADEON_GEM_DOMAIN_VRAM
, NULL
, &rdev
->uvd
.vcpu_bo
);
130 dev_err(rdev
->dev
, "(%d) failed to allocate UVD bo\n", r
);
134 r
= radeon_uvd_resume(rdev
);
138 memset(rdev
->uvd
.cpu_addr
, 0, bo_size
);
139 memcpy(rdev
->uvd
.cpu_addr
, rdev
->uvd_fw
->data
, rdev
->uvd_fw
->size
);
141 r
= radeon_uvd_suspend(rdev
);
145 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
146 atomic_set(&rdev
->uvd
.handles
[i
], 0);
147 rdev
->uvd
.filp
[i
] = NULL
;
153 void radeon_uvd_fini(struct radeon_device
*rdev
)
155 radeon_uvd_suspend(rdev
);
156 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
159 int radeon_uvd_suspend(struct radeon_device
*rdev
)
163 if (rdev
->uvd
.vcpu_bo
== NULL
)
166 r
= radeon_bo_reserve(rdev
->uvd
.vcpu_bo
, false);
168 radeon_bo_kunmap(rdev
->uvd
.vcpu_bo
);
169 radeon_bo_unpin(rdev
->uvd
.vcpu_bo
);
170 rdev
->uvd
.cpu_addr
= NULL
;
171 if (!radeon_bo_pin(rdev
->uvd
.vcpu_bo
, RADEON_GEM_DOMAIN_CPU
, NULL
)) {
172 radeon_bo_kmap(rdev
->uvd
.vcpu_bo
, &rdev
->uvd
.cpu_addr
);
174 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
176 if (rdev
->uvd
.cpu_addr
) {
177 radeon_fence_driver_start_ring(rdev
, R600_RING_TYPE_UVD_INDEX
);
179 rdev
->fence_drv
[R600_RING_TYPE_UVD_INDEX
].cpu_addr
= NULL
;
185 int radeon_uvd_resume(struct radeon_device
*rdev
)
189 if (rdev
->uvd
.vcpu_bo
== NULL
)
192 r
= radeon_bo_reserve(rdev
->uvd
.vcpu_bo
, false);
194 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
195 dev_err(rdev
->dev
, "(%d) failed to reserve UVD bo\n", r
);
199 /* Have been pin in cpu unmap unpin */
200 radeon_bo_kunmap(rdev
->uvd
.vcpu_bo
);
201 radeon_bo_unpin(rdev
->uvd
.vcpu_bo
);
203 r
= radeon_bo_pin(rdev
->uvd
.vcpu_bo
, RADEON_GEM_DOMAIN_VRAM
,
204 &rdev
->uvd
.gpu_addr
);
206 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
207 radeon_bo_unref(&rdev
->uvd
.vcpu_bo
);
208 dev_err(rdev
->dev
, "(%d) UVD bo pin failed\n", r
);
212 r
= radeon_bo_kmap(rdev
->uvd
.vcpu_bo
, &rdev
->uvd
.cpu_addr
);
214 dev_err(rdev
->dev
, "(%d) UVD map failed\n", r
);
218 radeon_bo_unreserve(rdev
->uvd
.vcpu_bo
);
223 void radeon_uvd_force_into_uvd_segment(struct radeon_bo
*rbo
)
225 rbo
->placement
.fpfn
= 0 >> PAGE_SHIFT
;
226 rbo
->placement
.lpfn
= (256 * 1024 * 1024) >> PAGE_SHIFT
;
229 void radeon_uvd_free_handles(struct radeon_device
*rdev
, struct drm_file
*filp
)
232 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
233 if (rdev
->uvd
.filp
[i
] == filp
) {
234 uint32_t handle
= atomic_read(&rdev
->uvd
.handles
[i
]);
235 struct radeon_fence
*fence
;
237 r
= radeon_uvd_get_destroy_msg(rdev
,
238 R600_RING_TYPE_UVD_INDEX
, handle
, &fence
);
240 DRM_ERROR("Error destroying UVD (%d)!\n", r
);
244 radeon_fence_wait(fence
, false);
245 radeon_fence_unref(&fence
);
247 rdev
->uvd
.filp
[i
] = NULL
;
248 atomic_set(&rdev
->uvd
.handles
[i
], 0);
253 static int radeon_uvd_cs_msg_decode(uint32_t *msg
, unsigned buf_sizes
[])
255 unsigned stream_type
= msg
[4];
256 unsigned width
= msg
[6];
257 unsigned height
= msg
[7];
258 unsigned dpb_size
= msg
[9];
259 unsigned pitch
= msg
[28];
261 unsigned width_in_mb
= width
/ 16;
262 unsigned height_in_mb
= ALIGN(height
/ 16, 2);
264 unsigned image_size
, tmp
, min_dpb_size
;
266 image_size
= width
* height
;
267 image_size
+= image_size
/ 2;
268 image_size
= ALIGN(image_size
, 1024);
270 switch (stream_type
) {
273 /* reference picture buffer */
274 min_dpb_size
= image_size
* 17;
276 /* macroblock context buffer */
277 min_dpb_size
+= width_in_mb
* height_in_mb
* 17 * 192;
279 /* IT surface buffer */
280 min_dpb_size
+= width_in_mb
* height_in_mb
* 32;
285 /* reference picture buffer */
286 min_dpb_size
= image_size
* 3;
289 min_dpb_size
+= width_in_mb
* height_in_mb
* 128;
291 /* IT surface buffer */
292 min_dpb_size
+= width_in_mb
* 64;
294 /* DB surface buffer */
295 min_dpb_size
+= width_in_mb
* 128;
298 tmp
= max(width_in_mb
, height_in_mb
);
299 min_dpb_size
+= ALIGN(tmp
* 7 * 16, 64);
304 /* reference picture buffer */
305 min_dpb_size
= image_size
* 3;
310 /* reference picture buffer */
311 min_dpb_size
= image_size
* 3;
314 min_dpb_size
+= width_in_mb
* height_in_mb
* 64;
316 /* IT surface buffer */
317 min_dpb_size
+= ALIGN(width_in_mb
* height_in_mb
* 32, 64);
321 DRM_ERROR("UVD codec not handled %d!\n", stream_type
);
326 DRM_ERROR("Invalid UVD decoding target pitch!\n");
330 if (dpb_size
< min_dpb_size
) {
331 DRM_ERROR("Invalid dpb_size in UVD message (%d / %d)!\n",
332 dpb_size
, min_dpb_size
);
336 buf_sizes
[0x1] = dpb_size
;
337 buf_sizes
[0x2] = image_size
;
341 static int radeon_uvd_cs_msg(struct radeon_cs_parser
*p
, struct radeon_bo
*bo
,
342 unsigned offset
, unsigned buf_sizes
[])
344 int32_t *msg
, msg_type
, handle
;
350 DRM_ERROR("UVD messages must be 64 byte aligned!\n");
354 r
= radeon_bo_kmap(bo
, &ptr
);
364 DRM_ERROR("Invalid UVD handle!\n");
369 /* it's a decode msg, calc buffer sizes */
370 r
= radeon_uvd_cs_msg_decode(msg
, buf_sizes
);
371 radeon_bo_kunmap(bo
);
375 } else if (msg_type
== 2) {
376 /* it's a destroy msg, free the handle */
377 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
)
378 atomic_cmpxchg(&p
->rdev
->uvd
.handles
[i
], handle
, 0);
379 radeon_bo_kunmap(bo
);
382 /* it's a create msg, no special handling needed */
383 radeon_bo_kunmap(bo
);
386 /* create or decode, validate the handle */
387 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
388 if (atomic_read(&p
->rdev
->uvd
.handles
[i
]) == handle
)
392 /* handle not found try to alloc a new one */
393 for (i
= 0; i
< RADEON_MAX_UVD_HANDLES
; ++i
) {
394 if (!atomic_cmpxchg(&p
->rdev
->uvd
.handles
[i
], 0, handle
)) {
395 p
->rdev
->uvd
.filp
[i
] = p
->filp
;
400 DRM_ERROR("No more free UVD handles!\n");
404 static int radeon_uvd_cs_reloc(struct radeon_cs_parser
*p
,
405 int data0
, int data1
,
406 unsigned buf_sizes
[])
408 struct radeon_cs_chunk
*relocs_chunk
;
409 struct radeon_cs_reloc
*reloc
;
410 unsigned idx
, cmd
, offset
;
414 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
415 offset
= radeon_get_ib_value(p
, data0
);
416 idx
= radeon_get_ib_value(p
, data1
);
417 if (idx
>= relocs_chunk
->length_dw
) {
418 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
419 idx
, relocs_chunk
->length_dw
);
423 reloc
= p
->relocs_ptr
[(idx
/ 4)];
424 start
= reloc
->lobj
.gpu_offset
;
425 end
= start
+ radeon_bo_size(reloc
->robj
);
428 p
->ib
.ptr
[data0
] = start
& 0xFFFFFFFF;
429 p
->ib
.ptr
[data1
] = start
>> 32;
431 cmd
= radeon_get_ib_value(p
, p
->idx
) >> 1;
434 if ((end
- start
) < buf_sizes
[cmd
]) {
435 DRM_ERROR("buffer to small (%d / %d)!\n",
436 (unsigned)(end
- start
), buf_sizes
[cmd
]);
440 } else if (cmd
!= 0x100) {
441 DRM_ERROR("invalid UVD command %X!\n", cmd
);
445 if ((start
>> 28) != (end
>> 28)) {
446 DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
451 /* TODO: is this still necessary on NI+ ? */
452 if ((cmd
== 0 || cmd
== 0x3) &&
453 (start
>> 28) != (p
->rdev
->uvd
.gpu_addr
>> 28)) {
454 DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
460 r
= radeon_uvd_cs_msg(p
, reloc
->robj
, offset
, buf_sizes
);
468 static int radeon_uvd_cs_reg(struct radeon_cs_parser
*p
,
469 struct radeon_cs_packet
*pkt
,
470 int *data0
, int *data1
,
471 unsigned buf_sizes
[])
476 for (i
= 0; i
<= pkt
->count
; ++i
) {
477 switch (pkt
->reg
+ i
*4) {
478 case UVD_GPCOM_VCPU_DATA0
:
481 case UVD_GPCOM_VCPU_DATA1
:
484 case UVD_GPCOM_VCPU_CMD
:
485 r
= radeon_uvd_cs_reloc(p
, *data0
, *data1
, buf_sizes
);
489 case UVD_ENGINE_CNTL
:
492 DRM_ERROR("Invalid reg 0x%X!\n",
501 int radeon_uvd_cs_parse(struct radeon_cs_parser
*p
)
503 struct radeon_cs_packet pkt
;
504 int r
, data0
= 0, data1
= 0;
506 /* minimum buffer sizes */
507 unsigned buf_sizes
[] = {
509 [0x00000001] = 32 * 1024 * 1024,
510 [0x00000002] = 2048 * 1152 * 3,
514 if (p
->chunks
[p
->chunk_ib_idx
].length_dw
% 16) {
515 DRM_ERROR("UVD IB length (%d) not 16 dwords aligned!\n",
516 p
->chunks
[p
->chunk_ib_idx
].length_dw
);
520 if (p
->chunk_relocs_idx
== -1) {
521 DRM_ERROR("No relocation chunk !\n");
527 r
= radeon_cs_packet_parse(p
, &pkt
, p
->idx
);
531 case RADEON_PACKET_TYPE0
:
532 r
= radeon_uvd_cs_reg(p
, &pkt
, &data0
,
537 case RADEON_PACKET_TYPE2
:
538 p
->idx
+= pkt
.count
+ 2;
541 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
544 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
548 static int radeon_uvd_send_msg(struct radeon_device
*rdev
,
549 int ring
, struct radeon_bo
*bo
,
550 struct radeon_fence
**fence
)
552 struct ttm_validate_buffer tv
;
553 struct ww_acquire_ctx ticket
;
554 struct list_head head
;
559 memset(&tv
, 0, sizeof(tv
));
562 INIT_LIST_HEAD(&head
);
563 list_add(&tv
.head
, &head
);
565 r
= ttm_eu_reserve_buffers(&ticket
, &head
);
569 radeon_ttm_placement_from_domain(bo
, RADEON_GEM_DOMAIN_VRAM
);
570 radeon_uvd_force_into_uvd_segment(bo
);
572 r
= ttm_bo_validate(&bo
->tbo
, &bo
->placement
, true, false);
576 r
= radeon_ib_get(rdev
, ring
, &ib
, NULL
, 16);
580 addr
= radeon_bo_gpu_offset(bo
);
581 ib
.ptr
[0] = PACKET0(UVD_GPCOM_VCPU_DATA0
, 0);
583 ib
.ptr
[2] = PACKET0(UVD_GPCOM_VCPU_DATA1
, 0);
584 ib
.ptr
[3] = addr
>> 32;
585 ib
.ptr
[4] = PACKET0(UVD_GPCOM_VCPU_CMD
, 0);
587 for (i
= 6; i
< 16; ++i
)
588 ib
.ptr
[i
] = PACKET2(0);
591 r
= radeon_ib_schedule(rdev
, &ib
, NULL
);
594 ttm_eu_fence_buffer_objects(&ticket
, &head
, ib
.fence
);
597 *fence
= radeon_fence_ref(ib
.fence
);
599 radeon_ib_free(rdev
, &ib
);
600 radeon_bo_unref(&bo
);
604 ttm_eu_backoff_reservation(&ticket
, &head
);
608 /* multiple fence commands without any stream commands in between can
609 crash the vcpu so just try to emmit a dummy create/destroy msg to
611 int radeon_uvd_get_create_msg(struct radeon_device
*rdev
, int ring
,
612 uint32_t handle
, struct radeon_fence
**fence
)
614 struct radeon_bo
*bo
;
618 r
= radeon_bo_create(rdev
, 1024, PAGE_SIZE
, true,
619 RADEON_GEM_DOMAIN_VRAM
, NULL
, &bo
);
623 r
= radeon_bo_reserve(bo
, false);
625 radeon_bo_unref(&bo
);
629 r
= radeon_bo_kmap(bo
, (void **)&msg
);
631 radeon_bo_unreserve(bo
);
632 radeon_bo_unref(&bo
);
636 /* stitch together an UVD create msg */
637 msg
[0] = cpu_to_le32(0x00000de4);
638 msg
[1] = cpu_to_le32(0x00000000);
639 msg
[2] = cpu_to_le32(handle
);
640 msg
[3] = cpu_to_le32(0x00000000);
641 msg
[4] = cpu_to_le32(0x00000000);
642 msg
[5] = cpu_to_le32(0x00000000);
643 msg
[6] = cpu_to_le32(0x00000000);
644 msg
[7] = cpu_to_le32(0x00000780);
645 msg
[8] = cpu_to_le32(0x00000440);
646 msg
[9] = cpu_to_le32(0x00000000);
647 msg
[10] = cpu_to_le32(0x01b37000);
648 for (i
= 11; i
< 1024; ++i
)
649 msg
[i
] = cpu_to_le32(0x0);
651 radeon_bo_kunmap(bo
);
652 radeon_bo_unreserve(bo
);
654 return radeon_uvd_send_msg(rdev
, ring
, bo
, fence
);
657 int radeon_uvd_get_destroy_msg(struct radeon_device
*rdev
, int ring
,
658 uint32_t handle
, struct radeon_fence
**fence
)
660 struct radeon_bo
*bo
;
664 r
= radeon_bo_create(rdev
, 1024, PAGE_SIZE
, true,
665 RADEON_GEM_DOMAIN_VRAM
, NULL
, &bo
);
669 r
= radeon_bo_reserve(bo
, false);
671 radeon_bo_unref(&bo
);
675 r
= radeon_bo_kmap(bo
, (void **)&msg
);
677 radeon_bo_unreserve(bo
);
678 radeon_bo_unref(&bo
);
682 /* stitch together an UVD destroy msg */
683 msg
[0] = cpu_to_le32(0x00000de4);
684 msg
[1] = cpu_to_le32(0x00000002);
685 msg
[2] = cpu_to_le32(handle
);
686 msg
[3] = cpu_to_le32(0x00000000);
687 for (i
= 4; i
< 1024; ++i
)
688 msg
[i
] = cpu_to_le32(0x0);
690 radeon_bo_kunmap(bo
);
691 radeon_bo_unreserve(bo
);
693 return radeon_uvd_send_msg(rdev
, ring
, bo
, fence
);
696 static void radeon_uvd_idle_work_handler(struct work_struct
*work
)
698 struct radeon_device
*rdev
=
699 container_of(work
, struct radeon_device
, uvd
.idle_work
.work
);
701 if (radeon_fence_count_emitted(rdev
, R600_RING_TYPE_UVD_INDEX
) == 0) {
702 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
703 mutex_lock(&rdev
->pm
.mutex
);
704 rdev
->pm
.dpm
.uvd_active
= false;
705 mutex_unlock(&rdev
->pm
.mutex
);
706 radeon_pm_compute_clocks(rdev
);
708 radeon_set_uvd_clocks(rdev
, 0, 0);
711 schedule_delayed_work(&rdev
->uvd
.idle_work
,
712 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS
));
716 void radeon_uvd_note_usage(struct radeon_device
*rdev
)
718 bool set_clocks
= !cancel_delayed_work_sync(&rdev
->uvd
.idle_work
);
719 set_clocks
&= schedule_delayed_work(&rdev
->uvd
.idle_work
,
720 msecs_to_jiffies(UVD_IDLE_TIMEOUT_MS
));
722 if ((rdev
->pm
.pm_method
== PM_METHOD_DPM
) && rdev
->pm
.dpm_enabled
) {
723 /* XXX pick SD/HD/MVC */
724 radeon_dpm_enable_power_state(rdev
, POWER_STATE_TYPE_INTERNAL_UVD
);
726 radeon_set_uvd_clocks(rdev
, 53300, 40000);
731 static unsigned radeon_uvd_calc_upll_post_div(unsigned vco_freq
,
732 unsigned target_freq
,
736 unsigned post_div
= vco_freq
/ target_freq
;
738 /* adjust to post divider minimum value */
739 if (post_div
< pd_min
)
742 /* we alway need a frequency less than or equal the target */
743 if ((vco_freq
/ post_div
) > target_freq
)
746 /* post dividers above a certain value must be even */
747 if (post_div
> pd_even
&& post_div
% 2)
754 * radeon_uvd_calc_upll_dividers - calc UPLL clock dividers
756 * @rdev: radeon_device pointer
759 * @vco_min: minimum VCO frequency
760 * @vco_max: maximum VCO frequency
761 * @fb_factor: factor to multiply vco freq with
762 * @fb_mask: limit and bitmask for feedback divider
763 * @pd_min: post divider minimum
764 * @pd_max: post divider maximum
765 * @pd_even: post divider must be even above this value
766 * @optimal_fb_div: resulting feedback divider
767 * @optimal_vclk_div: resulting vclk post divider
768 * @optimal_dclk_div: resulting dclk post divider
770 * Calculate dividers for UVDs UPLL (R6xx-SI, except APUs).
771 * Returns zero on success -EINVAL on error.
773 int radeon_uvd_calc_upll_dividers(struct radeon_device
*rdev
,
774 unsigned vclk
, unsigned dclk
,
775 unsigned vco_min
, unsigned vco_max
,
776 unsigned fb_factor
, unsigned fb_mask
,
777 unsigned pd_min
, unsigned pd_max
,
779 unsigned *optimal_fb_div
,
780 unsigned *optimal_vclk_div
,
781 unsigned *optimal_dclk_div
)
783 unsigned vco_freq
, ref_freq
= rdev
->clock
.spll
.reference_freq
;
785 /* start off with something large */
786 unsigned optimal_score
= ~0;
788 /* loop through vco from low to high */
789 vco_min
= max(max(vco_min
, vclk
), dclk
);
790 for (vco_freq
= vco_min
; vco_freq
<= vco_max
; vco_freq
+= 100) {
792 uint64_t fb_div
= (uint64_t)vco_freq
* fb_factor
;
793 unsigned vclk_div
, dclk_div
, score
;
795 do_div(fb_div
, ref_freq
);
797 /* fb div out of range ? */
798 if (fb_div
> fb_mask
)
799 break; /* it can oly get worse */
803 /* calc vclk divider with current vco freq */
804 vclk_div
= radeon_uvd_calc_upll_post_div(vco_freq
, vclk
,
806 if (vclk_div
> pd_max
)
807 break; /* vco is too big, it has to stop */
809 /* calc dclk divider with current vco freq */
810 dclk_div
= radeon_uvd_calc_upll_post_div(vco_freq
, dclk
,
812 if (vclk_div
> pd_max
)
813 break; /* vco is too big, it has to stop */
815 /* calc score with current vco freq */
816 score
= vclk
- (vco_freq
/ vclk_div
) + dclk
- (vco_freq
/ dclk_div
);
818 /* determine if this vco setting is better than current optimal settings */
819 if (score
< optimal_score
) {
820 *optimal_fb_div
= fb_div
;
821 *optimal_vclk_div
= vclk_div
;
822 *optimal_dclk_div
= dclk_div
;
823 optimal_score
= score
;
824 if (optimal_score
== 0)
825 break; /* it can't get better than this */
829 /* did we found a valid setup ? */
830 if (optimal_score
== ~0)
836 int radeon_uvd_send_upll_ctlreq(struct radeon_device
*rdev
,
837 unsigned cg_upll_func_cntl
)
841 /* make sure UPLL_CTLREQ is deasserted */
842 WREG32_P(cg_upll_func_cntl
, 0, ~UPLL_CTLREQ_MASK
);
846 /* assert UPLL_CTLREQ */
847 WREG32_P(cg_upll_func_cntl
, UPLL_CTLREQ_MASK
, ~UPLL_CTLREQ_MASK
);
849 /* wait for CTLACK and CTLACK2 to get asserted */
850 for (i
= 0; i
< 100; ++i
) {
851 uint32_t mask
= UPLL_CTLACK_MASK
| UPLL_CTLACK2_MASK
;
852 if ((RREG32(cg_upll_func_cntl
) & mask
) == mask
)
857 /* deassert UPLL_CTLREQ */
858 WREG32_P(cg_upll_func_cntl
, 0, ~UPLL_CTLREQ_MASK
);
861 DRM_ERROR("Timeout setting UVD clocks!\n");