2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
17 #include <drm/drm_atomic.h>
18 #include <drm/drm_crtc.h>
19 #include <drm/drm_crtc_helper.h>
20 #include <drm/drm_plane_helper.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/clk.h>
27 #include <linux/of_device.h>
28 #include <linux/pm_runtime.h>
29 #include <linux/component.h>
31 #include <linux/reset.h>
32 #include <linux/delay.h>
34 #include "rockchip_drm_drv.h"
35 #include "rockchip_drm_gem.h"
36 #include "rockchip_drm_fb.h"
37 #include "rockchip_drm_vop.h"
39 #define __REG_SET_RELAXED(x, off, mask, shift, v) \
40 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
41 #define __REG_SET_NORMAL(x, off, mask, shift, v) \
42 vop_mask_write(x, off, (mask) << shift, (v) << shift)
44 #define REG_SET(x, base, reg, v, mode) \
45 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
46 #define REG_SET_MASK(x, base, reg, v, mode) \
47 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
49 #define VOP_WIN_SET(x, win, name, v) \
50 REG_SET(x, win->base, win->phy->name, v, RELAXED)
51 #define VOP_SCL_SET(x, win, name, v) \
52 REG_SET(x, win->base, win->phy->scl->name, v, RELAXED)
53 #define VOP_SCL_SET_EXT(x, win, name, v) \
54 REG_SET(x, win->base, win->phy->scl->ext->name, v, RELAXED)
55 #define VOP_CTRL_SET(x, name, v) \
56 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
58 #define VOP_INTR_GET(vop, name) \
59 vop_read_reg(vop, 0, &vop->data->ctrl->name)
61 #define VOP_INTR_SET(vop, name, v) \
62 REG_SET(vop, 0, vop->data->intr->name, v, NORMAL)
63 #define VOP_INTR_SET_TYPE(vop, name, type, v) \
66 for (i = 0; i < vop->data->intr->nintrs; i++) { \
67 if (vop->data->intr->intrs[i] & type) \
70 VOP_INTR_SET(vop, name, reg); \
72 #define VOP_INTR_GET_TYPE(vop, name, type) \
73 vop_get_intr_type(vop, &vop->data->intr->name, type)
75 #define VOP_WIN_GET(x, win, name) \
76 vop_read_reg(x, win->base, &win->phy->name)
78 #define VOP_WIN_GET_YRGBADDR(vop, win) \
79 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
81 #define to_vop(x) container_of(x, struct vop, crtc)
82 #define to_vop_win(x) container_of(x, struct vop_win, base)
83 #define to_vop_plane_state(x) container_of(x, struct vop_plane_state, base)
85 struct vop_plane_state
{
86 struct drm_plane_state base
;
95 struct drm_plane base
;
96 const struct vop_win_data
*data
;
99 struct vop_plane_state state
;
103 struct drm_crtc crtc
;
105 struct drm_device
*drm_dev
;
108 /* mutex vsync_ work */
109 struct mutex vsync_mutex
;
110 bool vsync_work_pending
;
111 struct completion dsp_hold_completion
;
112 struct completion wait_update_complete
;
113 struct drm_pending_vblank_event
*event
;
115 const struct vop_data
*data
;
120 /* physical map length of vop register */
123 /* one time only one process allowed to config the register */
125 /* lock vop irq reg */
134 /* vop share memory frequency */
138 struct reset_control
*dclk_rst
;
140 struct vop_win win
[];
143 static inline void vop_writel(struct vop
*vop
, uint32_t offset
, uint32_t v
)
145 writel(v
, vop
->regs
+ offset
);
146 vop
->regsbak
[offset
>> 2] = v
;
149 static inline uint32_t vop_readl(struct vop
*vop
, uint32_t offset
)
151 return readl(vop
->regs
+ offset
);
154 static inline uint32_t vop_read_reg(struct vop
*vop
, uint32_t base
,
155 const struct vop_reg
*reg
)
157 return (vop_readl(vop
, base
+ reg
->offset
) >> reg
->shift
) & reg
->mask
;
160 static inline void vop_mask_write(struct vop
*vop
, uint32_t offset
,
161 uint32_t mask
, uint32_t v
)
164 uint32_t cached_val
= vop
->regsbak
[offset
>> 2];
166 cached_val
= (cached_val
& ~mask
) | v
;
167 writel(cached_val
, vop
->regs
+ offset
);
168 vop
->regsbak
[offset
>> 2] = cached_val
;
172 static inline void vop_mask_write_relaxed(struct vop
*vop
, uint32_t offset
,
173 uint32_t mask
, uint32_t v
)
176 uint32_t cached_val
= vop
->regsbak
[offset
>> 2];
178 cached_val
= (cached_val
& ~mask
) | v
;
179 writel_relaxed(cached_val
, vop
->regs
+ offset
);
180 vop
->regsbak
[offset
>> 2] = cached_val
;
184 static inline uint32_t vop_get_intr_type(struct vop
*vop
,
185 const struct vop_reg
*reg
, int type
)
188 uint32_t regs
= vop_read_reg(vop
, 0, reg
);
190 for (i
= 0; i
< vop
->data
->intr
->nintrs
; i
++) {
191 if ((type
& vop
->data
->intr
->intrs
[i
]) && (regs
& 1 << i
))
192 ret
|= vop
->data
->intr
->intrs
[i
];
198 static inline void vop_cfg_done(struct vop
*vop
)
200 VOP_CTRL_SET(vop
, cfg_done
, 1);
203 static bool has_rb_swapped(uint32_t format
)
206 case DRM_FORMAT_XBGR8888
:
207 case DRM_FORMAT_ABGR8888
:
208 case DRM_FORMAT_BGR888
:
209 case DRM_FORMAT_BGR565
:
216 static enum vop_data_format
vop_convert_format(uint32_t format
)
219 case DRM_FORMAT_XRGB8888
:
220 case DRM_FORMAT_ARGB8888
:
221 case DRM_FORMAT_XBGR8888
:
222 case DRM_FORMAT_ABGR8888
:
223 return VOP_FMT_ARGB8888
;
224 case DRM_FORMAT_RGB888
:
225 case DRM_FORMAT_BGR888
:
226 return VOP_FMT_RGB888
;
227 case DRM_FORMAT_RGB565
:
228 case DRM_FORMAT_BGR565
:
229 return VOP_FMT_RGB565
;
230 case DRM_FORMAT_NV12
:
231 return VOP_FMT_YUV420SP
;
232 case DRM_FORMAT_NV16
:
233 return VOP_FMT_YUV422SP
;
234 case DRM_FORMAT_NV24
:
235 return VOP_FMT_YUV444SP
;
237 DRM_ERROR("unsupport format[%08x]\n", format
);
242 static bool is_yuv_support(uint32_t format
)
245 case DRM_FORMAT_NV12
:
246 case DRM_FORMAT_NV16
:
247 case DRM_FORMAT_NV24
:
254 static bool is_alpha_support(uint32_t format
)
257 case DRM_FORMAT_ARGB8888
:
258 case DRM_FORMAT_ABGR8888
:
265 static uint16_t scl_vop_cal_scale(enum scale_mode mode
, uint32_t src
,
266 uint32_t dst
, bool is_horizontal
,
267 int vsu_mode
, int *vskiplines
)
269 uint16_t val
= 1 << SCL_FT_DEFAULT_FIXPOINT_SHIFT
;
272 if (mode
== SCALE_UP
)
273 val
= GET_SCL_FT_BIC(src
, dst
);
274 else if (mode
== SCALE_DOWN
)
275 val
= GET_SCL_FT_BILI_DN(src
, dst
);
277 if (mode
== SCALE_UP
) {
278 if (vsu_mode
== SCALE_UP_BIL
)
279 val
= GET_SCL_FT_BILI_UP(src
, dst
);
281 val
= GET_SCL_FT_BIC(src
, dst
);
282 } else if (mode
== SCALE_DOWN
) {
284 *vskiplines
= scl_get_vskiplines(src
, dst
);
285 val
= scl_get_bili_dn_vskip(src
, dst
,
288 val
= GET_SCL_FT_BILI_DN(src
, dst
);
296 static void scl_vop_cal_scl_fac(struct vop
*vop
, const struct vop_win_data
*win
,
297 uint32_t src_w
, uint32_t src_h
, uint32_t dst_w
,
298 uint32_t dst_h
, uint32_t pixel_format
)
300 uint16_t yrgb_hor_scl_mode
, yrgb_ver_scl_mode
;
301 uint16_t cbcr_hor_scl_mode
= SCALE_NONE
;
302 uint16_t cbcr_ver_scl_mode
= SCALE_NONE
;
303 int hsub
= drm_format_horz_chroma_subsampling(pixel_format
);
304 int vsub
= drm_format_vert_chroma_subsampling(pixel_format
);
305 bool is_yuv
= is_yuv_support(pixel_format
);
306 uint16_t cbcr_src_w
= src_w
/ hsub
;
307 uint16_t cbcr_src_h
= src_h
/ vsub
;
314 DRM_ERROR("Maximum destination width (3840) exceeded\n");
318 if (!win
->phy
->scl
->ext
) {
319 VOP_SCL_SET(vop
, win
, scale_yrgb_x
,
320 scl_cal_scale2(src_w
, dst_w
));
321 VOP_SCL_SET(vop
, win
, scale_yrgb_y
,
322 scl_cal_scale2(src_h
, dst_h
));
324 VOP_SCL_SET(vop
, win
, scale_cbcr_x
,
325 scl_cal_scale2(src_w
, dst_w
));
326 VOP_SCL_SET(vop
, win
, scale_cbcr_y
,
327 scl_cal_scale2(src_h
, dst_h
));
332 yrgb_hor_scl_mode
= scl_get_scl_mode(src_w
, dst_w
);
333 yrgb_ver_scl_mode
= scl_get_scl_mode(src_h
, dst_h
);
336 cbcr_hor_scl_mode
= scl_get_scl_mode(cbcr_src_w
, dst_w
);
337 cbcr_ver_scl_mode
= scl_get_scl_mode(cbcr_src_h
, dst_h
);
338 if (cbcr_hor_scl_mode
== SCALE_DOWN
)
339 lb_mode
= scl_vop_cal_lb_mode(dst_w
, true);
341 lb_mode
= scl_vop_cal_lb_mode(cbcr_src_w
, true);
343 if (yrgb_hor_scl_mode
== SCALE_DOWN
)
344 lb_mode
= scl_vop_cal_lb_mode(dst_w
, false);
346 lb_mode
= scl_vop_cal_lb_mode(src_w
, false);
349 VOP_SCL_SET_EXT(vop
, win
, lb_mode
, lb_mode
);
350 if (lb_mode
== LB_RGB_3840X2
) {
351 if (yrgb_ver_scl_mode
!= SCALE_NONE
) {
352 DRM_ERROR("ERROR : not allow yrgb ver scale\n");
355 if (cbcr_ver_scl_mode
!= SCALE_NONE
) {
356 DRM_ERROR("ERROR : not allow cbcr ver scale\n");
359 vsu_mode
= SCALE_UP_BIL
;
360 } else if (lb_mode
== LB_RGB_2560X4
) {
361 vsu_mode
= SCALE_UP_BIL
;
363 vsu_mode
= SCALE_UP_BIC
;
366 val
= scl_vop_cal_scale(yrgb_hor_scl_mode
, src_w
, dst_w
,
368 VOP_SCL_SET(vop
, win
, scale_yrgb_x
, val
);
369 val
= scl_vop_cal_scale(yrgb_ver_scl_mode
, src_h
, dst_h
,
370 false, vsu_mode
, &vskiplines
);
371 VOP_SCL_SET(vop
, win
, scale_yrgb_y
, val
);
373 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt4
, vskiplines
== 4);
374 VOP_SCL_SET_EXT(vop
, win
, vsd_yrgb_gt2
, vskiplines
== 2);
376 VOP_SCL_SET_EXT(vop
, win
, yrgb_hor_scl_mode
, yrgb_hor_scl_mode
);
377 VOP_SCL_SET_EXT(vop
, win
, yrgb_ver_scl_mode
, yrgb_ver_scl_mode
);
378 VOP_SCL_SET_EXT(vop
, win
, yrgb_hsd_mode
, SCALE_DOWN_BIL
);
379 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsd_mode
, SCALE_DOWN_BIL
);
380 VOP_SCL_SET_EXT(vop
, win
, yrgb_vsu_mode
, vsu_mode
);
382 val
= scl_vop_cal_scale(cbcr_hor_scl_mode
, cbcr_src_w
,
383 dst_w
, true, 0, NULL
);
384 VOP_SCL_SET(vop
, win
, scale_cbcr_x
, val
);
385 val
= scl_vop_cal_scale(cbcr_ver_scl_mode
, cbcr_src_h
,
386 dst_h
, false, vsu_mode
, &vskiplines
);
387 VOP_SCL_SET(vop
, win
, scale_cbcr_y
, val
);
389 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt4
, vskiplines
== 4);
390 VOP_SCL_SET_EXT(vop
, win
, vsd_cbcr_gt2
, vskiplines
== 2);
391 VOP_SCL_SET_EXT(vop
, win
, cbcr_hor_scl_mode
, cbcr_hor_scl_mode
);
392 VOP_SCL_SET_EXT(vop
, win
, cbcr_ver_scl_mode
, cbcr_ver_scl_mode
);
393 VOP_SCL_SET_EXT(vop
, win
, cbcr_hsd_mode
, SCALE_DOWN_BIL
);
394 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsd_mode
, SCALE_DOWN_BIL
);
395 VOP_SCL_SET_EXT(vop
, win
, cbcr_vsu_mode
, vsu_mode
);
399 static void vop_dsp_hold_valid_irq_enable(struct vop
*vop
)
403 if (WARN_ON(!vop
->is_enabled
))
406 spin_lock_irqsave(&vop
->irq_lock
, flags
);
408 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 1);
410 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
413 static void vop_dsp_hold_valid_irq_disable(struct vop
*vop
)
417 if (WARN_ON(!vop
->is_enabled
))
420 spin_lock_irqsave(&vop
->irq_lock
, flags
);
422 VOP_INTR_SET_TYPE(vop
, enable
, DSP_HOLD_VALID_INTR
, 0);
424 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
427 static void vop_enable(struct drm_crtc
*crtc
)
429 struct vop
*vop
= to_vop(crtc
);
435 ret
= pm_runtime_get_sync(vop
->dev
);
437 dev_err(vop
->dev
, "failed to get pm runtime: %d\n", ret
);
441 ret
= clk_enable(vop
->hclk
);
443 dev_err(vop
->dev
, "failed to enable hclk - %d\n", ret
);
447 ret
= clk_enable(vop
->dclk
);
449 dev_err(vop
->dev
, "failed to enable dclk - %d\n", ret
);
450 goto err_disable_hclk
;
453 ret
= clk_enable(vop
->aclk
);
455 dev_err(vop
->dev
, "failed to enable aclk - %d\n", ret
);
456 goto err_disable_dclk
;
460 * Slave iommu shares power, irq and clock with vop. It was associated
461 * automatically with this master device via common driver code.
462 * Now that we have enabled the clock we attach it to the shared drm
465 ret
= rockchip_drm_dma_attach_device(vop
->drm_dev
, vop
->dev
);
467 dev_err(vop
->dev
, "failed to attach dma mapping, %d\n", ret
);
468 goto err_disable_aclk
;
471 memcpy(vop
->regs
, vop
->regsbak
, vop
->len
);
473 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
475 vop
->is_enabled
= true;
477 spin_lock(&vop
->reg_lock
);
479 VOP_CTRL_SET(vop
, standby
, 0);
481 spin_unlock(&vop
->reg_lock
);
483 enable_irq(vop
->irq
);
485 drm_crtc_vblank_on(crtc
);
490 clk_disable(vop
->aclk
);
492 clk_disable(vop
->dclk
);
494 clk_disable(vop
->hclk
);
497 static void vop_crtc_disable(struct drm_crtc
*crtc
)
499 struct vop
*vop
= to_vop(crtc
);
501 if (!vop
->is_enabled
)
504 drm_crtc_vblank_off(crtc
);
507 * Vop standby will take effect at end of current frame,
508 * if dsp hold valid irq happen, it means standby complete.
510 * we must wait standby complete when we want to disable aclk,
511 * if not, memory bus maybe dead.
513 reinit_completion(&vop
->dsp_hold_completion
);
514 vop_dsp_hold_valid_irq_enable(vop
);
516 spin_lock(&vop
->reg_lock
);
518 VOP_CTRL_SET(vop
, standby
, 1);
520 spin_unlock(&vop
->reg_lock
);
522 wait_for_completion(&vop
->dsp_hold_completion
);
524 vop_dsp_hold_valid_irq_disable(vop
);
526 disable_irq(vop
->irq
);
528 vop
->is_enabled
= false;
531 * vop standby complete, so iommu detach is safe.
533 rockchip_drm_dma_detach_device(vop
->drm_dev
, vop
->dev
);
535 clk_disable(vop
->dclk
);
536 clk_disable(vop
->aclk
);
537 clk_disable(vop
->hclk
);
538 pm_runtime_put(vop
->dev
);
541 static void vop_plane_destroy(struct drm_plane
*plane
)
543 drm_plane_cleanup(plane
);
546 static int vop_plane_atomic_check(struct drm_plane
*plane
,
547 struct drm_plane_state
*state
)
549 struct drm_crtc
*crtc
= state
->crtc
;
550 struct drm_framebuffer
*fb
= state
->fb
;
551 struct vop_win
*vop_win
= to_vop_win(plane
);
552 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(state
);
553 const struct vop_win_data
*win
= vop_win
->data
;
556 struct drm_rect
*dest
= &vop_plane_state
->dest
;
557 struct drm_rect
*src
= &vop_plane_state
->src
;
558 struct drm_rect clip
;
559 int min_scale
= win
->phy
->scl
? FRAC_16_16(1, 8) :
560 DRM_PLANE_HELPER_NO_SCALING
;
561 int max_scale
= win
->phy
->scl
? FRAC_16_16(8, 1) :
562 DRM_PLANE_HELPER_NO_SCALING
;
564 crtc
= crtc
? crtc
: plane
->state
->crtc
;
566 * Both crtc or plane->state->crtc can be null.
570 src
->x1
= state
->src_x
;
571 src
->y1
= state
->src_y
;
572 src
->x2
= state
->src_x
+ state
->src_w
;
573 src
->y2
= state
->src_y
+ state
->src_h
;
574 dest
->x1
= state
->crtc_x
;
575 dest
->y1
= state
->crtc_y
;
576 dest
->x2
= state
->crtc_x
+ state
->crtc_w
;
577 dest
->y2
= state
->crtc_y
+ state
->crtc_h
;
581 clip
.x2
= crtc
->mode
.hdisplay
;
582 clip
.y2
= crtc
->mode
.vdisplay
;
584 ret
= drm_plane_helper_check_update(plane
, crtc
, state
->fb
,
588 true, true, &visible
);
595 vop_plane_state
->format
= vop_convert_format(fb
->pixel_format
);
596 if (vop_plane_state
->format
< 0)
597 return vop_plane_state
->format
;
600 * Src.x1 can be odd when do clip, but yuv plane start point
601 * need align with 2 pixel.
603 if (is_yuv_support(fb
->pixel_format
) && ((src
->x1
>> 16) % 2))
606 vop_plane_state
->enable
= true;
611 vop_plane_state
->enable
= false;
615 static void vop_plane_atomic_disable(struct drm_plane
*plane
,
616 struct drm_plane_state
*old_state
)
618 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(old_state
);
619 struct vop_win
*vop_win
= to_vop_win(plane
);
620 const struct vop_win_data
*win
= vop_win
->data
;
621 struct vop
*vop
= to_vop(old_state
->crtc
);
623 if (!old_state
->crtc
)
626 spin_lock(&vop
->reg_lock
);
628 VOP_WIN_SET(vop
, win
, enable
, 0);
630 spin_unlock(&vop
->reg_lock
);
632 vop_plane_state
->enable
= false;
635 static void vop_plane_atomic_update(struct drm_plane
*plane
,
636 struct drm_plane_state
*old_state
)
638 struct drm_plane_state
*state
= plane
->state
;
639 struct drm_crtc
*crtc
= state
->crtc
;
640 struct vop_win
*vop_win
= to_vop_win(plane
);
641 struct vop_plane_state
*vop_plane_state
= to_vop_plane_state(state
);
642 const struct vop_win_data
*win
= vop_win
->data
;
643 struct vop
*vop
= to_vop(state
->crtc
);
644 struct drm_framebuffer
*fb
= state
->fb
;
645 unsigned int actual_w
, actual_h
;
646 unsigned int dsp_stx
, dsp_sty
;
647 uint32_t act_info
, dsp_info
, dsp_st
;
648 struct drm_rect
*src
= &vop_plane_state
->src
;
649 struct drm_rect
*dest
= &vop_plane_state
->dest
;
650 struct drm_gem_object
*obj
, *uv_obj
;
651 struct rockchip_gem_object
*rk_obj
, *rk_uv_obj
;
652 unsigned long offset
;
658 * can't update plane when vop is disabled.
663 if (WARN_ON(!vop
->is_enabled
))
666 if (!vop_plane_state
->enable
) {
667 vop_plane_atomic_disable(plane
, old_state
);
671 obj
= rockchip_fb_get_gem_obj(fb
, 0);
672 rk_obj
= to_rockchip_obj(obj
);
674 actual_w
= drm_rect_width(src
) >> 16;
675 actual_h
= drm_rect_height(src
) >> 16;
676 act_info
= (actual_h
- 1) << 16 | ((actual_w
- 1) & 0xffff);
678 dsp_info
= (drm_rect_height(dest
) - 1) << 16;
679 dsp_info
|= (drm_rect_width(dest
) - 1) & 0xffff;
681 dsp_stx
= dest
->x1
+ crtc
->mode
.htotal
- crtc
->mode
.hsync_start
;
682 dsp_sty
= dest
->y1
+ crtc
->mode
.vtotal
- crtc
->mode
.vsync_start
;
683 dsp_st
= dsp_sty
<< 16 | (dsp_stx
& 0xffff);
685 offset
= (src
->x1
>> 16) * drm_format_plane_cpp(fb
->pixel_format
, 0);
686 offset
+= (src
->y1
>> 16) * fb
->pitches
[0];
687 vop_plane_state
->yrgb_mst
= rk_obj
->dma_addr
+ offset
+ fb
->offsets
[0];
689 spin_lock(&vop
->reg_lock
);
691 VOP_WIN_SET(vop
, win
, format
, vop_plane_state
->format
);
692 VOP_WIN_SET(vop
, win
, yrgb_vir
, fb
->pitches
[0] >> 2);
693 VOP_WIN_SET(vop
, win
, yrgb_mst
, vop_plane_state
->yrgb_mst
);
694 if (is_yuv_support(fb
->pixel_format
)) {
695 int hsub
= drm_format_horz_chroma_subsampling(fb
->pixel_format
);
696 int vsub
= drm_format_vert_chroma_subsampling(fb
->pixel_format
);
697 int bpp
= drm_format_plane_cpp(fb
->pixel_format
, 1);
699 uv_obj
= rockchip_fb_get_gem_obj(fb
, 1);
700 rk_uv_obj
= to_rockchip_obj(uv_obj
);
702 offset
= (src
->x1
>> 16) * bpp
/ hsub
;
703 offset
+= (src
->y1
>> 16) * fb
->pitches
[1] / vsub
;
705 dma_addr
= rk_uv_obj
->dma_addr
+ offset
+ fb
->offsets
[1];
706 VOP_WIN_SET(vop
, win
, uv_vir
, fb
->pitches
[1] >> 2);
707 VOP_WIN_SET(vop
, win
, uv_mst
, dma_addr
);
711 scl_vop_cal_scl_fac(vop
, win
, actual_w
, actual_h
,
712 drm_rect_width(dest
), drm_rect_height(dest
),
715 VOP_WIN_SET(vop
, win
, act_info
, act_info
);
716 VOP_WIN_SET(vop
, win
, dsp_info
, dsp_info
);
717 VOP_WIN_SET(vop
, win
, dsp_st
, dsp_st
);
719 rb_swap
= has_rb_swapped(fb
->pixel_format
);
720 VOP_WIN_SET(vop
, win
, rb_swap
, rb_swap
);
722 if (is_alpha_support(fb
->pixel_format
)) {
723 VOP_WIN_SET(vop
, win
, dst_alpha_ctl
,
724 DST_FACTOR_M0(ALPHA_SRC_INVERSE
));
725 val
= SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL
) |
726 SRC_ALPHA_M0(ALPHA_STRAIGHT
) |
727 SRC_BLEND_M0(ALPHA_PER_PIX
) |
728 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION
) |
729 SRC_FACTOR_M0(ALPHA_ONE
);
730 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, val
);
732 VOP_WIN_SET(vop
, win
, src_alpha_ctl
, SRC_ALPHA_EN(0));
735 VOP_WIN_SET(vop
, win
, enable
, 1);
736 spin_unlock(&vop
->reg_lock
);
739 static const struct drm_plane_helper_funcs plane_helper_funcs
= {
740 .atomic_check
= vop_plane_atomic_check
,
741 .atomic_update
= vop_plane_atomic_update
,
742 .atomic_disable
= vop_plane_atomic_disable
,
745 void vop_atomic_plane_reset(struct drm_plane
*plane
)
747 struct vop_plane_state
*vop_plane_state
=
748 to_vop_plane_state(plane
->state
);
750 if (plane
->state
&& plane
->state
->fb
)
751 drm_framebuffer_unreference(plane
->state
->fb
);
753 kfree(vop_plane_state
);
754 vop_plane_state
= kzalloc(sizeof(*vop_plane_state
), GFP_KERNEL
);
755 if (!vop_plane_state
)
758 plane
->state
= &vop_plane_state
->base
;
759 plane
->state
->plane
= plane
;
762 struct drm_plane_state
*
763 vop_atomic_plane_duplicate_state(struct drm_plane
*plane
)
765 struct vop_plane_state
*old_vop_plane_state
;
766 struct vop_plane_state
*vop_plane_state
;
768 if (WARN_ON(!plane
->state
))
771 old_vop_plane_state
= to_vop_plane_state(plane
->state
);
772 vop_plane_state
= kmemdup(old_vop_plane_state
,
773 sizeof(*vop_plane_state
), GFP_KERNEL
);
774 if (!vop_plane_state
)
777 __drm_atomic_helper_plane_duplicate_state(plane
,
778 &vop_plane_state
->base
);
780 return &vop_plane_state
->base
;
783 static void vop_atomic_plane_destroy_state(struct drm_plane
*plane
,
784 struct drm_plane_state
*state
)
786 struct vop_plane_state
*vop_state
= to_vop_plane_state(state
);
788 __drm_atomic_helper_plane_destroy_state(plane
, state
);
793 static const struct drm_plane_funcs vop_plane_funcs
= {
794 .update_plane
= drm_atomic_helper_update_plane
,
795 .disable_plane
= drm_atomic_helper_disable_plane
,
796 .destroy
= vop_plane_destroy
,
797 .reset
= vop_atomic_plane_reset
,
798 .atomic_duplicate_state
= vop_atomic_plane_duplicate_state
,
799 .atomic_destroy_state
= vop_atomic_plane_destroy_state
,
802 int rockchip_drm_crtc_mode_config(struct drm_crtc
*crtc
,
806 struct vop
*vop
= to_vop(crtc
);
808 if (WARN_ON(!vop
->is_enabled
))
811 switch (connector_type
) {
812 case DRM_MODE_CONNECTOR_LVDS
:
813 VOP_CTRL_SET(vop
, rgb_en
, 1);
815 case DRM_MODE_CONNECTOR_eDP
:
816 VOP_CTRL_SET(vop
, edp_en
, 1);
818 case DRM_MODE_CONNECTOR_HDMIA
:
819 VOP_CTRL_SET(vop
, hdmi_en
, 1);
821 case DRM_MODE_CONNECTOR_DSI
:
822 VOP_CTRL_SET(vop
, mipi_en
, 1);
825 DRM_ERROR("unsupport connector_type[%d]\n", connector_type
);
828 VOP_CTRL_SET(vop
, out_mode
, out_mode
);
832 EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config
);
834 static int vop_crtc_enable_vblank(struct drm_crtc
*crtc
)
836 struct vop
*vop
= to_vop(crtc
);
839 if (WARN_ON(!vop
->is_enabled
))
842 spin_lock_irqsave(&vop
->irq_lock
, flags
);
844 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 1);
846 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
851 static void vop_crtc_disable_vblank(struct drm_crtc
*crtc
)
853 struct vop
*vop
= to_vop(crtc
);
856 if (WARN_ON(!vop
->is_enabled
))
859 spin_lock_irqsave(&vop
->irq_lock
, flags
);
861 VOP_INTR_SET_TYPE(vop
, enable
, FS_INTR
, 0);
863 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
866 static void vop_crtc_wait_for_update(struct drm_crtc
*crtc
)
868 struct vop
*vop
= to_vop(crtc
);
870 reinit_completion(&vop
->wait_update_complete
);
871 WARN_ON(!wait_for_completion_timeout(&vop
->wait_update_complete
, 100));
874 static const struct rockchip_crtc_funcs private_crtc_funcs
= {
875 .enable_vblank
= vop_crtc_enable_vblank
,
876 .disable_vblank
= vop_crtc_disable_vblank
,
877 .wait_for_update
= vop_crtc_wait_for_update
,
880 static bool vop_crtc_mode_fixup(struct drm_crtc
*crtc
,
881 const struct drm_display_mode
*mode
,
882 struct drm_display_mode
*adjusted_mode
)
884 struct vop
*vop
= to_vop(crtc
);
886 if (adjusted_mode
->htotal
== 0 || adjusted_mode
->vtotal
== 0)
889 adjusted_mode
->clock
=
890 clk_round_rate(vop
->dclk
, mode
->clock
* 1000) / 1000;
895 static void vop_crtc_enable(struct drm_crtc
*crtc
)
897 struct vop
*vop
= to_vop(crtc
);
898 struct drm_display_mode
*adjusted_mode
= &crtc
->state
->adjusted_mode
;
899 u16 hsync_len
= adjusted_mode
->hsync_end
- adjusted_mode
->hsync_start
;
900 u16 hdisplay
= adjusted_mode
->hdisplay
;
901 u16 htotal
= adjusted_mode
->htotal
;
902 u16 hact_st
= adjusted_mode
->htotal
- adjusted_mode
->hsync_start
;
903 u16 hact_end
= hact_st
+ hdisplay
;
904 u16 vdisplay
= adjusted_mode
->vdisplay
;
905 u16 vtotal
= adjusted_mode
->vtotal
;
906 u16 vsync_len
= adjusted_mode
->vsync_end
- adjusted_mode
->vsync_start
;
907 u16 vact_st
= adjusted_mode
->vtotal
- adjusted_mode
->vsync_start
;
908 u16 vact_end
= vact_st
+ vdisplay
;
913 * If dclk rate is zero, mean that scanout is stop,
914 * we don't need wait any more.
916 if (clk_get_rate(vop
->dclk
)) {
918 * Rk3288 vop timing register is immediately, when configure
919 * display timing on display time, may cause tearing.
921 * Vop standby will take effect at end of current frame,
922 * if dsp hold valid irq happen, it means standby complete.
925 * standby and wait complete --> |----
929 * configure display timing --> |
934 reinit_completion(&vop
->dsp_hold_completion
);
935 vop_dsp_hold_valid_irq_enable(vop
);
937 spin_lock(&vop
->reg_lock
);
939 VOP_CTRL_SET(vop
, standby
, 1);
941 spin_unlock(&vop
->reg_lock
);
943 wait_for_completion(&vop
->dsp_hold_completion
);
945 vop_dsp_hold_valid_irq_disable(vop
);
949 val
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_NHSYNC
) ? 0 : 1;
950 val
|= (adjusted_mode
->flags
& DRM_MODE_FLAG_NVSYNC
) ? 0 : (1 << 1);
951 VOP_CTRL_SET(vop
, pin_pol
, val
);
953 VOP_CTRL_SET(vop
, htotal_pw
, (htotal
<< 16) | hsync_len
);
956 VOP_CTRL_SET(vop
, hact_st_end
, val
);
957 VOP_CTRL_SET(vop
, hpost_st_end
, val
);
959 VOP_CTRL_SET(vop
, vtotal_pw
, (vtotal
<< 16) | vsync_len
);
962 VOP_CTRL_SET(vop
, vact_st_end
, val
);
963 VOP_CTRL_SET(vop
, vpost_st_end
, val
);
965 clk_set_rate(vop
->dclk
, adjusted_mode
->clock
* 1000);
967 VOP_CTRL_SET(vop
, standby
, 0);
970 static void vop_crtc_atomic_flush(struct drm_crtc
*crtc
,
971 struct drm_crtc_state
*old_crtc_state
)
973 struct vop
*vop
= to_vop(crtc
);
975 if (WARN_ON(!vop
->is_enabled
))
978 spin_lock(&vop
->reg_lock
);
982 spin_unlock(&vop
->reg_lock
);
985 static void vop_crtc_atomic_begin(struct drm_crtc
*crtc
,
986 struct drm_crtc_state
*old_crtc_state
)
988 struct vop
*vop
= to_vop(crtc
);
990 if (crtc
->state
->event
) {
991 WARN_ON(drm_crtc_vblank_get(crtc
) != 0);
993 vop
->event
= crtc
->state
->event
;
994 crtc
->state
->event
= NULL
;
998 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs
= {
999 .enable
= vop_crtc_enable
,
1000 .disable
= vop_crtc_disable
,
1001 .mode_fixup
= vop_crtc_mode_fixup
,
1002 .atomic_flush
= vop_crtc_atomic_flush
,
1003 .atomic_begin
= vop_crtc_atomic_begin
,
1006 static void vop_crtc_destroy(struct drm_crtc
*crtc
)
1008 drm_crtc_cleanup(crtc
);
1011 static const struct drm_crtc_funcs vop_crtc_funcs
= {
1012 .set_config
= drm_atomic_helper_set_config
,
1013 .page_flip
= drm_atomic_helper_page_flip
,
1014 .destroy
= vop_crtc_destroy
,
1015 .reset
= drm_atomic_helper_crtc_reset
,
1016 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
1017 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
1020 static bool vop_win_pending_is_complete(struct vop_win
*vop_win
)
1022 struct drm_plane
*plane
= &vop_win
->base
;
1023 struct vop_plane_state
*state
= to_vop_plane_state(plane
->state
);
1024 dma_addr_t yrgb_mst
;
1027 return VOP_WIN_GET(vop_win
->vop
, vop_win
->data
, enable
) == 0;
1029 yrgb_mst
= VOP_WIN_GET_YRGBADDR(vop_win
->vop
, vop_win
->data
);
1031 return yrgb_mst
== state
->yrgb_mst
;
1034 static void vop_handle_vblank(struct vop
*vop
)
1036 struct drm_device
*drm
= vop
->drm_dev
;
1037 struct drm_crtc
*crtc
= &vop
->crtc
;
1038 unsigned long flags
;
1041 for (i
= 0; i
< vop
->data
->win_size
; i
++) {
1042 if (!vop_win_pending_is_complete(&vop
->win
[i
]))
1047 spin_lock_irqsave(&drm
->event_lock
, flags
);
1049 drm_crtc_send_vblank_event(crtc
, vop
->event
);
1050 drm_crtc_vblank_put(crtc
);
1053 spin_unlock_irqrestore(&drm
->event_lock
, flags
);
1055 if (!completion_done(&vop
->wait_update_complete
))
1056 complete(&vop
->wait_update_complete
);
1059 static irqreturn_t
vop_isr(int irq
, void *data
)
1061 struct vop
*vop
= data
;
1062 struct drm_crtc
*crtc
= &vop
->crtc
;
1063 uint32_t active_irqs
;
1064 unsigned long flags
;
1068 * interrupt register has interrupt status, enable and clear bits, we
1069 * must hold irq_lock to avoid a race with enable/disable_vblank().
1071 spin_lock_irqsave(&vop
->irq_lock
, flags
);
1073 active_irqs
= VOP_INTR_GET_TYPE(vop
, status
, INTR_MASK
);
1074 /* Clear all active interrupt sources */
1076 VOP_INTR_SET_TYPE(vop
, clear
, active_irqs
, 1);
1078 spin_unlock_irqrestore(&vop
->irq_lock
, flags
);
1080 /* This is expected for vop iommu irqs, since the irq is shared */
1084 if (active_irqs
& DSP_HOLD_VALID_INTR
) {
1085 complete(&vop
->dsp_hold_completion
);
1086 active_irqs
&= ~DSP_HOLD_VALID_INTR
;
1090 if (active_irqs
& FS_INTR
) {
1091 drm_crtc_handle_vblank(crtc
);
1092 vop_handle_vblank(vop
);
1093 active_irqs
&= ~FS_INTR
;
1097 /* Unhandled irqs are spurious. */
1099 DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs
);
1104 static int vop_create_crtc(struct vop
*vop
)
1106 const struct vop_data
*vop_data
= vop
->data
;
1107 struct device
*dev
= vop
->dev
;
1108 struct drm_device
*drm_dev
= vop
->drm_dev
;
1109 struct drm_plane
*primary
= NULL
, *cursor
= NULL
, *plane
;
1110 struct drm_crtc
*crtc
= &vop
->crtc
;
1111 struct device_node
*port
;
1116 * Create drm_plane for primary and cursor planes first, since we need
1117 * to pass them to drm_crtc_init_with_planes, which sets the
1118 * "possible_crtcs" to the newly initialized crtc.
1120 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1121 struct vop_win
*vop_win
= &vop
->win
[i
];
1122 const struct vop_win_data
*win_data
= vop_win
->data
;
1124 if (win_data
->type
!= DRM_PLANE_TYPE_PRIMARY
&&
1125 win_data
->type
!= DRM_PLANE_TYPE_CURSOR
)
1128 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1129 0, &vop_plane_funcs
,
1130 win_data
->phy
->data_formats
,
1131 win_data
->phy
->nformats
,
1132 win_data
->type
, NULL
);
1134 DRM_ERROR("failed to initialize plane\n");
1135 goto err_cleanup_planes
;
1138 plane
= &vop_win
->base
;
1139 drm_plane_helper_add(plane
, &plane_helper_funcs
);
1140 if (plane
->type
== DRM_PLANE_TYPE_PRIMARY
)
1142 else if (plane
->type
== DRM_PLANE_TYPE_CURSOR
)
1146 ret
= drm_crtc_init_with_planes(drm_dev
, crtc
, primary
, cursor
,
1147 &vop_crtc_funcs
, NULL
);
1151 drm_crtc_helper_add(crtc
, &vop_crtc_helper_funcs
);
1154 * Create drm_planes for overlay windows with possible_crtcs restricted
1155 * to the newly created crtc.
1157 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1158 struct vop_win
*vop_win
= &vop
->win
[i
];
1159 const struct vop_win_data
*win_data
= vop_win
->data
;
1160 unsigned long possible_crtcs
= 1 << drm_crtc_index(crtc
);
1162 if (win_data
->type
!= DRM_PLANE_TYPE_OVERLAY
)
1165 ret
= drm_universal_plane_init(vop
->drm_dev
, &vop_win
->base
,
1168 win_data
->phy
->data_formats
,
1169 win_data
->phy
->nformats
,
1170 win_data
->type
, NULL
);
1172 DRM_ERROR("failed to initialize overlay plane\n");
1173 goto err_cleanup_crtc
;
1175 drm_plane_helper_add(&vop_win
->base
, &plane_helper_funcs
);
1178 port
= of_get_child_by_name(dev
->of_node
, "port");
1180 DRM_ERROR("no port node found in %s\n",
1181 dev
->of_node
->full_name
);
1182 goto err_cleanup_crtc
;
1185 init_completion(&vop
->dsp_hold_completion
);
1186 init_completion(&vop
->wait_update_complete
);
1188 rockchip_register_crtc_funcs(crtc
, &private_crtc_funcs
);
1193 drm_crtc_cleanup(crtc
);
1195 list_for_each_entry(plane
, &drm_dev
->mode_config
.plane_list
, head
)
1196 drm_plane_cleanup(plane
);
1200 static void vop_destroy_crtc(struct vop
*vop
)
1202 struct drm_crtc
*crtc
= &vop
->crtc
;
1204 rockchip_unregister_crtc_funcs(crtc
);
1205 of_node_put(crtc
->port
);
1206 drm_crtc_cleanup(crtc
);
1209 static int vop_initial(struct vop
*vop
)
1211 const struct vop_data
*vop_data
= vop
->data
;
1212 const struct vop_reg_data
*init_table
= vop_data
->init_table
;
1213 struct reset_control
*ahb_rst
;
1216 vop
->hclk
= devm_clk_get(vop
->dev
, "hclk_vop");
1217 if (IS_ERR(vop
->hclk
)) {
1218 dev_err(vop
->dev
, "failed to get hclk source\n");
1219 return PTR_ERR(vop
->hclk
);
1221 vop
->aclk
= devm_clk_get(vop
->dev
, "aclk_vop");
1222 if (IS_ERR(vop
->aclk
)) {
1223 dev_err(vop
->dev
, "failed to get aclk source\n");
1224 return PTR_ERR(vop
->aclk
);
1226 vop
->dclk
= devm_clk_get(vop
->dev
, "dclk_vop");
1227 if (IS_ERR(vop
->dclk
)) {
1228 dev_err(vop
->dev
, "failed to get dclk source\n");
1229 return PTR_ERR(vop
->dclk
);
1232 ret
= clk_prepare(vop
->dclk
);
1234 dev_err(vop
->dev
, "failed to prepare dclk\n");
1238 /* Enable both the hclk and aclk to setup the vop */
1239 ret
= clk_prepare_enable(vop
->hclk
);
1241 dev_err(vop
->dev
, "failed to prepare/enable hclk\n");
1242 goto err_unprepare_dclk
;
1245 ret
= clk_prepare_enable(vop
->aclk
);
1247 dev_err(vop
->dev
, "failed to prepare/enable aclk\n");
1248 goto err_disable_hclk
;
1252 * do hclk_reset, reset all vop registers.
1254 ahb_rst
= devm_reset_control_get(vop
->dev
, "ahb");
1255 if (IS_ERR(ahb_rst
)) {
1256 dev_err(vop
->dev
, "failed to get ahb reset\n");
1257 ret
= PTR_ERR(ahb_rst
);
1258 goto err_disable_aclk
;
1260 reset_control_assert(ahb_rst
);
1261 usleep_range(10, 20);
1262 reset_control_deassert(ahb_rst
);
1264 memcpy(vop
->regsbak
, vop
->regs
, vop
->len
);
1266 for (i
= 0; i
< vop_data
->table_size
; i
++)
1267 vop_writel(vop
, init_table
[i
].offset
, init_table
[i
].value
);
1269 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1270 const struct vop_win_data
*win
= &vop_data
->win
[i
];
1272 VOP_WIN_SET(vop
, win
, enable
, 0);
1278 * do dclk_reset, let all config take affect.
1280 vop
->dclk_rst
= devm_reset_control_get(vop
->dev
, "dclk");
1281 if (IS_ERR(vop
->dclk_rst
)) {
1282 dev_err(vop
->dev
, "failed to get dclk reset\n");
1283 ret
= PTR_ERR(vop
->dclk_rst
);
1284 goto err_disable_aclk
;
1286 reset_control_assert(vop
->dclk_rst
);
1287 usleep_range(10, 20);
1288 reset_control_deassert(vop
->dclk_rst
);
1290 clk_disable(vop
->hclk
);
1291 clk_disable(vop
->aclk
);
1293 vop
->is_enabled
= false;
1298 clk_disable_unprepare(vop
->aclk
);
1300 clk_disable_unprepare(vop
->hclk
);
1302 clk_unprepare(vop
->dclk
);
1307 * Initialize the vop->win array elements.
1309 static void vop_win_init(struct vop
*vop
)
1311 const struct vop_data
*vop_data
= vop
->data
;
1314 for (i
= 0; i
< vop_data
->win_size
; i
++) {
1315 struct vop_win
*vop_win
= &vop
->win
[i
];
1316 const struct vop_win_data
*win_data
= &vop_data
->win
[i
];
1318 vop_win
->data
= win_data
;
1323 static int vop_bind(struct device
*dev
, struct device
*master
, void *data
)
1325 struct platform_device
*pdev
= to_platform_device(dev
);
1326 const struct vop_data
*vop_data
;
1327 struct drm_device
*drm_dev
= data
;
1329 struct resource
*res
;
1333 vop_data
= of_device_get_match_data(dev
);
1337 /* Allocate vop struct and its vop_win array */
1338 alloc_size
= sizeof(*vop
) + sizeof(*vop
->win
) * vop_data
->win_size
;
1339 vop
= devm_kzalloc(dev
, alloc_size
, GFP_KERNEL
);
1344 vop
->data
= vop_data
;
1345 vop
->drm_dev
= drm_dev
;
1346 dev_set_drvdata(dev
, vop
);
1350 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1351 vop
->len
= resource_size(res
);
1352 vop
->regs
= devm_ioremap_resource(dev
, res
);
1353 if (IS_ERR(vop
->regs
))
1354 return PTR_ERR(vop
->regs
);
1356 vop
->regsbak
= devm_kzalloc(dev
, vop
->len
, GFP_KERNEL
);
1360 ret
= vop_initial(vop
);
1362 dev_err(&pdev
->dev
, "cannot initial vop dev - err %d\n", ret
);
1366 irq
= platform_get_irq(pdev
, 0);
1368 dev_err(dev
, "cannot find irq for vop\n");
1371 vop
->irq
= (unsigned int)irq
;
1373 spin_lock_init(&vop
->reg_lock
);
1374 spin_lock_init(&vop
->irq_lock
);
1376 mutex_init(&vop
->vsync_mutex
);
1378 ret
= devm_request_irq(dev
, vop
->irq
, vop_isr
,
1379 IRQF_SHARED
, dev_name(dev
), vop
);
1383 /* IRQ is initially disabled; it gets enabled in power_on */
1384 disable_irq(vop
->irq
);
1386 ret
= vop_create_crtc(vop
);
1390 pm_runtime_enable(&pdev
->dev
);
1394 static void vop_unbind(struct device
*dev
, struct device
*master
, void *data
)
1396 struct vop
*vop
= dev_get_drvdata(dev
);
1398 pm_runtime_disable(dev
);
1399 vop_destroy_crtc(vop
);
1402 const struct component_ops vop_component_ops
= {
1404 .unbind
= vop_unbind
,
1406 EXPORT_SYMBOL_GPL(vop_component_ops
);