drm/rockchip: vop: restore vop registers when resume
[deliverable/linux.git] / drivers / gpu / drm / rockchip / rockchip_drm_vop.c
1 /*
2 * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
3 * Author:Mark Yao <mark.yao@rock-chips.com>
4 *
5 * This software is licensed under the terms of the GNU General Public
6 * License version 2, as published by the Free Software Foundation, and
7 * may be copied, distributed, and modified under those terms.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15 #include <drm/drm.h>
16 #include <drm/drmP.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/drm_plane_helper.h>
20
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/clk.h>
25 #include <linux/of.h>
26 #include <linux/of_device.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/component.h>
29
30 #include <linux/reset.h>
31 #include <linux/delay.h>
32
33 #include "rockchip_drm_drv.h"
34 #include "rockchip_drm_gem.h"
35 #include "rockchip_drm_fb.h"
36 #include "rockchip_drm_vop.h"
37
38 #define VOP_REG(off, _mask, s) \
39 {.offset = off, \
40 .mask = _mask, \
41 .shift = s,}
42
43 #define __REG_SET_RELAXED(x, off, mask, shift, v) \
44 vop_mask_write_relaxed(x, off, (mask) << shift, (v) << shift)
45 #define __REG_SET_NORMAL(x, off, mask, shift, v) \
46 vop_mask_write(x, off, (mask) << shift, (v) << shift)
47
48 #define REG_SET(x, base, reg, v, mode) \
49 __REG_SET_##mode(x, base + reg.offset, reg.mask, reg.shift, v)
50
51 #define VOP_WIN_SET(x, win, name, v) \
52 REG_SET(x, win->base, win->phy->name, v, RELAXED)
53 #define VOP_CTRL_SET(x, name, v) \
54 REG_SET(x, 0, (x)->data->ctrl->name, v, NORMAL)
55
56 #define VOP_WIN_GET(x, win, name) \
57 vop_read_reg(x, win->base, &win->phy->name)
58
59 #define VOP_WIN_GET_YRGBADDR(vop, win) \
60 vop_readl(vop, win->base + win->phy->yrgb_mst.offset)
61
62 #define to_vop(x) container_of(x, struct vop, crtc)
63 #define to_vop_win(x) container_of(x, struct vop_win, base)
64
65 struct vop_win_state {
66 struct list_head head;
67 struct drm_framebuffer *fb;
68 dma_addr_t yrgb_mst;
69 struct drm_pending_vblank_event *event;
70 };
71
72 struct vop_win {
73 struct drm_plane base;
74 const struct vop_win_data *data;
75 struct vop *vop;
76
77 struct list_head pending;
78 struct vop_win_state *active;
79 };
80
81 struct vop {
82 struct drm_crtc crtc;
83 struct device *dev;
84 struct drm_device *drm_dev;
85 bool is_enabled;
86
87 int connector_type;
88 int connector_out_mode;
89
90 /* mutex vsync_ work */
91 struct mutex vsync_mutex;
92 bool vsync_work_pending;
93 struct completion dsp_hold_completion;
94
95 const struct vop_data *data;
96
97 uint32_t *regsbak;
98 void __iomem *regs;
99
100 /* physical map length of vop register */
101 uint32_t len;
102
103 /* one time only one process allowed to config the register */
104 spinlock_t reg_lock;
105 /* lock vop irq reg */
106 spinlock_t irq_lock;
107
108 unsigned int irq;
109
110 /* vop AHP clk */
111 struct clk *hclk;
112 /* vop dclk */
113 struct clk *dclk;
114 /* vop share memory frequency */
115 struct clk *aclk;
116
117 /* vop dclk reset */
118 struct reset_control *dclk_rst;
119
120 int pipe;
121
122 struct vop_win win[];
123 };
124
125 enum vop_data_format {
126 VOP_FMT_ARGB8888 = 0,
127 VOP_FMT_RGB888,
128 VOP_FMT_RGB565,
129 VOP_FMT_YUV420SP = 4,
130 VOP_FMT_YUV422SP,
131 VOP_FMT_YUV444SP,
132 };
133
134 struct vop_reg_data {
135 uint32_t offset;
136 uint32_t value;
137 };
138
139 struct vop_reg {
140 uint32_t offset;
141 uint32_t shift;
142 uint32_t mask;
143 };
144
145 struct vop_ctrl {
146 struct vop_reg standby;
147 struct vop_reg data_blank;
148 struct vop_reg gate_en;
149 struct vop_reg mmu_en;
150 struct vop_reg rgb_en;
151 struct vop_reg edp_en;
152 struct vop_reg hdmi_en;
153 struct vop_reg mipi_en;
154 struct vop_reg out_mode;
155 struct vop_reg dither_down;
156 struct vop_reg dither_up;
157 struct vop_reg pin_pol;
158
159 struct vop_reg htotal_pw;
160 struct vop_reg hact_st_end;
161 struct vop_reg vtotal_pw;
162 struct vop_reg vact_st_end;
163 struct vop_reg hpost_st_end;
164 struct vop_reg vpost_st_end;
165 };
166
167 struct vop_win_phy {
168 const uint32_t *data_formats;
169 uint32_t nformats;
170
171 struct vop_reg enable;
172 struct vop_reg format;
173 struct vop_reg rb_swap;
174 struct vop_reg act_info;
175 struct vop_reg dsp_info;
176 struct vop_reg dsp_st;
177 struct vop_reg yrgb_mst;
178 struct vop_reg uv_mst;
179 struct vop_reg yrgb_vir;
180 struct vop_reg uv_vir;
181
182 struct vop_reg dst_alpha_ctl;
183 struct vop_reg src_alpha_ctl;
184 };
185
186 struct vop_win_data {
187 uint32_t base;
188 const struct vop_win_phy *phy;
189 enum drm_plane_type type;
190 };
191
192 struct vop_data {
193 const struct vop_reg_data *init_table;
194 unsigned int table_size;
195 const struct vop_ctrl *ctrl;
196 const struct vop_win_data *win;
197 unsigned int win_size;
198 };
199
200 static const uint32_t formats_01[] = {
201 DRM_FORMAT_XRGB8888,
202 DRM_FORMAT_ARGB8888,
203 DRM_FORMAT_XBGR8888,
204 DRM_FORMAT_ABGR8888,
205 DRM_FORMAT_RGB888,
206 DRM_FORMAT_BGR888,
207 DRM_FORMAT_RGB565,
208 DRM_FORMAT_BGR565,
209 DRM_FORMAT_NV12,
210 DRM_FORMAT_NV16,
211 DRM_FORMAT_NV24,
212 };
213
214 static const uint32_t formats_234[] = {
215 DRM_FORMAT_XRGB8888,
216 DRM_FORMAT_ARGB8888,
217 DRM_FORMAT_XBGR8888,
218 DRM_FORMAT_ABGR8888,
219 DRM_FORMAT_RGB888,
220 DRM_FORMAT_BGR888,
221 DRM_FORMAT_RGB565,
222 DRM_FORMAT_BGR565,
223 };
224
225 static const struct vop_win_phy win01_data = {
226 .data_formats = formats_01,
227 .nformats = ARRAY_SIZE(formats_01),
228 .enable = VOP_REG(WIN0_CTRL0, 0x1, 0),
229 .format = VOP_REG(WIN0_CTRL0, 0x7, 1),
230 .rb_swap = VOP_REG(WIN0_CTRL0, 0x1, 12),
231 .act_info = VOP_REG(WIN0_ACT_INFO, 0x1fff1fff, 0),
232 .dsp_info = VOP_REG(WIN0_DSP_INFO, 0x0fff0fff, 0),
233 .dsp_st = VOP_REG(WIN0_DSP_ST, 0x1fff1fff, 0),
234 .yrgb_mst = VOP_REG(WIN0_YRGB_MST, 0xffffffff, 0),
235 .uv_mst = VOP_REG(WIN0_CBR_MST, 0xffffffff, 0),
236 .yrgb_vir = VOP_REG(WIN0_VIR, 0x3fff, 0),
237 .uv_vir = VOP_REG(WIN0_VIR, 0x3fff, 16),
238 .src_alpha_ctl = VOP_REG(WIN0_SRC_ALPHA_CTRL, 0xff, 0),
239 .dst_alpha_ctl = VOP_REG(WIN0_DST_ALPHA_CTRL, 0xff, 0),
240 };
241
242 static const struct vop_win_phy win23_data = {
243 .data_formats = formats_234,
244 .nformats = ARRAY_SIZE(formats_234),
245 .enable = VOP_REG(WIN2_CTRL0, 0x1, 0),
246 .format = VOP_REG(WIN2_CTRL0, 0x7, 1),
247 .rb_swap = VOP_REG(WIN2_CTRL0, 0x1, 12),
248 .dsp_info = VOP_REG(WIN2_DSP_INFO0, 0x0fff0fff, 0),
249 .dsp_st = VOP_REG(WIN2_DSP_ST0, 0x1fff1fff, 0),
250 .yrgb_mst = VOP_REG(WIN2_MST0, 0xffffffff, 0),
251 .yrgb_vir = VOP_REG(WIN2_VIR0_1, 0x1fff, 0),
252 .src_alpha_ctl = VOP_REG(WIN2_SRC_ALPHA_CTRL, 0xff, 0),
253 .dst_alpha_ctl = VOP_REG(WIN2_DST_ALPHA_CTRL, 0xff, 0),
254 };
255
256 static const struct vop_ctrl ctrl_data = {
257 .standby = VOP_REG(SYS_CTRL, 0x1, 22),
258 .gate_en = VOP_REG(SYS_CTRL, 0x1, 23),
259 .mmu_en = VOP_REG(SYS_CTRL, 0x1, 20),
260 .rgb_en = VOP_REG(SYS_CTRL, 0x1, 12),
261 .hdmi_en = VOP_REG(SYS_CTRL, 0x1, 13),
262 .edp_en = VOP_REG(SYS_CTRL, 0x1, 14),
263 .mipi_en = VOP_REG(SYS_CTRL, 0x1, 15),
264 .dither_down = VOP_REG(DSP_CTRL1, 0xf, 1),
265 .dither_up = VOP_REG(DSP_CTRL1, 0x1, 6),
266 .data_blank = VOP_REG(DSP_CTRL0, 0x1, 19),
267 .out_mode = VOP_REG(DSP_CTRL0, 0xf, 0),
268 .pin_pol = VOP_REG(DSP_CTRL0, 0xf, 4),
269 .htotal_pw = VOP_REG(DSP_HTOTAL_HS_END, 0x1fff1fff, 0),
270 .hact_st_end = VOP_REG(DSP_HACT_ST_END, 0x1fff1fff, 0),
271 .vtotal_pw = VOP_REG(DSP_VTOTAL_VS_END, 0x1fff1fff, 0),
272 .vact_st_end = VOP_REG(DSP_VACT_ST_END, 0x1fff1fff, 0),
273 .hpost_st_end = VOP_REG(POST_DSP_HACT_INFO, 0x1fff1fff, 0),
274 .vpost_st_end = VOP_REG(POST_DSP_VACT_INFO, 0x1fff1fff, 0),
275 };
276
277 static const struct vop_reg_data vop_init_reg_table[] = {
278 {SYS_CTRL, 0x00c00000},
279 {DSP_CTRL0, 0x00000000},
280 {WIN0_CTRL0, 0x00000080},
281 {WIN1_CTRL0, 0x00000080},
282 /* TODO: Win2/3 support multiple area function, but we haven't found
283 * a suitable way to use it yet, so let's just use them as other windows
284 * with only area 0 enabled.
285 */
286 {WIN2_CTRL0, 0x00000010},
287 {WIN3_CTRL0, 0x00000010},
288 };
289
290 /*
291 * Note: rk3288 has a dedicated 'cursor' window, however, that window requires
292 * special support to get alpha blending working. For now, just use overlay
293 * window 3 for the drm cursor.
294 *
295 */
296 static const struct vop_win_data rk3288_vop_win_data[] = {
297 { .base = 0x00, .phy = &win01_data, .type = DRM_PLANE_TYPE_PRIMARY },
298 { .base = 0x40, .phy = &win01_data, .type = DRM_PLANE_TYPE_OVERLAY },
299 { .base = 0x00, .phy = &win23_data, .type = DRM_PLANE_TYPE_OVERLAY },
300 { .base = 0x50, .phy = &win23_data, .type = DRM_PLANE_TYPE_CURSOR },
301 };
302
303 static const struct vop_data rk3288_vop = {
304 .init_table = vop_init_reg_table,
305 .table_size = ARRAY_SIZE(vop_init_reg_table),
306 .ctrl = &ctrl_data,
307 .win = rk3288_vop_win_data,
308 .win_size = ARRAY_SIZE(rk3288_vop_win_data),
309 };
310
311 static const struct of_device_id vop_driver_dt_match[] = {
312 { .compatible = "rockchip,rk3288-vop",
313 .data = &rk3288_vop },
314 {},
315 };
316
317 static inline void vop_writel(struct vop *vop, uint32_t offset, uint32_t v)
318 {
319 writel(v, vop->regs + offset);
320 vop->regsbak[offset >> 2] = v;
321 }
322
323 static inline uint32_t vop_readl(struct vop *vop, uint32_t offset)
324 {
325 return readl(vop->regs + offset);
326 }
327
328 static inline uint32_t vop_read_reg(struct vop *vop, uint32_t base,
329 const struct vop_reg *reg)
330 {
331 return (vop_readl(vop, base + reg->offset) >> reg->shift) & reg->mask;
332 }
333
334 static inline void vop_cfg_done(struct vop *vop)
335 {
336 writel(0x01, vop->regs + REG_CFG_DONE);
337 }
338
339 static inline void vop_mask_write(struct vop *vop, uint32_t offset,
340 uint32_t mask, uint32_t v)
341 {
342 if (mask) {
343 uint32_t cached_val = vop->regsbak[offset >> 2];
344
345 cached_val = (cached_val & ~mask) | v;
346 writel(cached_val, vop->regs + offset);
347 vop->regsbak[offset >> 2] = cached_val;
348 }
349 }
350
351 static inline void vop_mask_write_relaxed(struct vop *vop, uint32_t offset,
352 uint32_t mask, uint32_t v)
353 {
354 if (mask) {
355 uint32_t cached_val = vop->regsbak[offset >> 2];
356
357 cached_val = (cached_val & ~mask) | v;
358 writel_relaxed(cached_val, vop->regs + offset);
359 vop->regsbak[offset >> 2] = cached_val;
360 }
361 }
362
363 static bool has_rb_swapped(uint32_t format)
364 {
365 switch (format) {
366 case DRM_FORMAT_XBGR8888:
367 case DRM_FORMAT_ABGR8888:
368 case DRM_FORMAT_BGR888:
369 case DRM_FORMAT_BGR565:
370 return true;
371 default:
372 return false;
373 }
374 }
375
376 static enum vop_data_format vop_convert_format(uint32_t format)
377 {
378 switch (format) {
379 case DRM_FORMAT_XRGB8888:
380 case DRM_FORMAT_ARGB8888:
381 case DRM_FORMAT_XBGR8888:
382 case DRM_FORMAT_ABGR8888:
383 return VOP_FMT_ARGB8888;
384 case DRM_FORMAT_RGB888:
385 case DRM_FORMAT_BGR888:
386 return VOP_FMT_RGB888;
387 case DRM_FORMAT_RGB565:
388 case DRM_FORMAT_BGR565:
389 return VOP_FMT_RGB565;
390 case DRM_FORMAT_NV12:
391 return VOP_FMT_YUV420SP;
392 case DRM_FORMAT_NV16:
393 return VOP_FMT_YUV422SP;
394 case DRM_FORMAT_NV24:
395 return VOP_FMT_YUV444SP;
396 default:
397 DRM_ERROR("unsupport format[%08x]\n", format);
398 return -EINVAL;
399 }
400 }
401
402 static bool is_yuv_support(uint32_t format)
403 {
404 switch (format) {
405 case DRM_FORMAT_NV12:
406 case DRM_FORMAT_NV16:
407 case DRM_FORMAT_NV24:
408 return true;
409 default:
410 return false;
411 }
412 }
413
414 static bool is_alpha_support(uint32_t format)
415 {
416 switch (format) {
417 case DRM_FORMAT_ARGB8888:
418 case DRM_FORMAT_ABGR8888:
419 return true;
420 default:
421 return false;
422 }
423 }
424
425 static void vop_dsp_hold_valid_irq_enable(struct vop *vop)
426 {
427 unsigned long flags;
428
429 if (WARN_ON(!vop->is_enabled))
430 return;
431
432 spin_lock_irqsave(&vop->irq_lock, flags);
433
434 vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK,
435 DSP_HOLD_VALID_INTR_EN(1));
436
437 spin_unlock_irqrestore(&vop->irq_lock, flags);
438 }
439
440 static void vop_dsp_hold_valid_irq_disable(struct vop *vop)
441 {
442 unsigned long flags;
443
444 if (WARN_ON(!vop->is_enabled))
445 return;
446
447 spin_lock_irqsave(&vop->irq_lock, flags);
448
449 vop_mask_write(vop, INTR_CTRL0, DSP_HOLD_VALID_INTR_MASK,
450 DSP_HOLD_VALID_INTR_EN(0));
451
452 spin_unlock_irqrestore(&vop->irq_lock, flags);
453 }
454
455 static void vop_enable(struct drm_crtc *crtc)
456 {
457 struct vop *vop = to_vop(crtc);
458 int ret;
459
460 if (vop->is_enabled)
461 return;
462
463 ret = pm_runtime_get_sync(vop->dev);
464 if (ret < 0) {
465 dev_err(vop->dev, "failed to get pm runtime: %d\n", ret);
466 return;
467 }
468
469 ret = clk_enable(vop->hclk);
470 if (ret < 0) {
471 dev_err(vop->dev, "failed to enable hclk - %d\n", ret);
472 return;
473 }
474
475 ret = clk_enable(vop->dclk);
476 if (ret < 0) {
477 dev_err(vop->dev, "failed to enable dclk - %d\n", ret);
478 goto err_disable_hclk;
479 }
480
481 ret = clk_enable(vop->aclk);
482 if (ret < 0) {
483 dev_err(vop->dev, "failed to enable aclk - %d\n", ret);
484 goto err_disable_dclk;
485 }
486
487 /*
488 * Slave iommu shares power, irq and clock with vop. It was associated
489 * automatically with this master device via common driver code.
490 * Now that we have enabled the clock we attach it to the shared drm
491 * mapping.
492 */
493 ret = rockchip_drm_dma_attach_device(vop->drm_dev, vop->dev);
494 if (ret) {
495 dev_err(vop->dev, "failed to attach dma mapping, %d\n", ret);
496 goto err_disable_aclk;
497 }
498
499 memcpy(vop->regs, vop->regsbak, vop->len);
500 /*
501 * At here, vop clock & iommu is enable, R/W vop regs would be safe.
502 */
503 vop->is_enabled = true;
504
505 spin_lock(&vop->reg_lock);
506
507 VOP_CTRL_SET(vop, standby, 0);
508
509 spin_unlock(&vop->reg_lock);
510
511 enable_irq(vop->irq);
512
513 drm_vblank_on(vop->drm_dev, vop->pipe);
514
515 return;
516
517 err_disable_aclk:
518 clk_disable(vop->aclk);
519 err_disable_dclk:
520 clk_disable(vop->dclk);
521 err_disable_hclk:
522 clk_disable(vop->hclk);
523 }
524
525 static void vop_disable(struct drm_crtc *crtc)
526 {
527 struct vop *vop = to_vop(crtc);
528
529 if (!vop->is_enabled)
530 return;
531
532 drm_vblank_off(crtc->dev, vop->pipe);
533
534 /*
535 * Vop standby will take effect at end of current frame,
536 * if dsp hold valid irq happen, it means standby complete.
537 *
538 * we must wait standby complete when we want to disable aclk,
539 * if not, memory bus maybe dead.
540 */
541 reinit_completion(&vop->dsp_hold_completion);
542 vop_dsp_hold_valid_irq_enable(vop);
543
544 spin_lock(&vop->reg_lock);
545
546 VOP_CTRL_SET(vop, standby, 1);
547
548 spin_unlock(&vop->reg_lock);
549
550 wait_for_completion(&vop->dsp_hold_completion);
551
552 vop_dsp_hold_valid_irq_disable(vop);
553
554 disable_irq(vop->irq);
555
556 vop->is_enabled = false;
557
558 /*
559 * vop standby complete, so iommu detach is safe.
560 */
561 rockchip_drm_dma_detach_device(vop->drm_dev, vop->dev);
562
563 clk_disable(vop->dclk);
564 clk_disable(vop->aclk);
565 clk_disable(vop->hclk);
566 pm_runtime_put(vop->dev);
567 }
568
569 /*
570 * Caller must hold vsync_mutex.
571 */
572 static struct drm_framebuffer *vop_win_last_pending_fb(struct vop_win *vop_win)
573 {
574 struct vop_win_state *last;
575 struct vop_win_state *active = vop_win->active;
576
577 if (list_empty(&vop_win->pending))
578 return active ? active->fb : NULL;
579
580 last = list_last_entry(&vop_win->pending, struct vop_win_state, head);
581 return last ? last->fb : NULL;
582 }
583
584 /*
585 * Caller must hold vsync_mutex.
586 */
587 static int vop_win_queue_fb(struct vop_win *vop_win,
588 struct drm_framebuffer *fb, dma_addr_t yrgb_mst,
589 struct drm_pending_vblank_event *event)
590 {
591 struct vop_win_state *state;
592
593 state = kzalloc(sizeof(*state), GFP_KERNEL);
594 if (!state)
595 return -ENOMEM;
596
597 state->fb = fb;
598 state->yrgb_mst = yrgb_mst;
599 state->event = event;
600
601 list_add_tail(&state->head, &vop_win->pending);
602
603 return 0;
604 }
605
606 static int vop_update_plane_event(struct drm_plane *plane,
607 struct drm_crtc *crtc,
608 struct drm_framebuffer *fb, int crtc_x,
609 int crtc_y, unsigned int crtc_w,
610 unsigned int crtc_h, uint32_t src_x,
611 uint32_t src_y, uint32_t src_w,
612 uint32_t src_h,
613 struct drm_pending_vblank_event *event)
614 {
615 struct vop_win *vop_win = to_vop_win(plane);
616 const struct vop_win_data *win = vop_win->data;
617 struct vop *vop = to_vop(crtc);
618 struct drm_gem_object *obj;
619 struct rockchip_gem_object *rk_obj;
620 struct drm_gem_object *uv_obj;
621 struct rockchip_gem_object *rk_uv_obj;
622 unsigned long offset;
623 unsigned int actual_w;
624 unsigned int actual_h;
625 unsigned int dsp_stx;
626 unsigned int dsp_sty;
627 unsigned int y_vir_stride;
628 unsigned int uv_vir_stride = 0;
629 dma_addr_t yrgb_mst;
630 dma_addr_t uv_mst = 0;
631 enum vop_data_format format;
632 uint32_t val;
633 bool is_alpha;
634 bool rb_swap;
635 bool is_yuv;
636 bool visible;
637 int ret;
638 struct drm_rect dest = {
639 .x1 = crtc_x,
640 .y1 = crtc_y,
641 .x2 = crtc_x + crtc_w,
642 .y2 = crtc_y + crtc_h,
643 };
644 struct drm_rect src = {
645 /* 16.16 fixed point */
646 .x1 = src_x,
647 .y1 = src_y,
648 .x2 = src_x + src_w,
649 .y2 = src_y + src_h,
650 };
651 const struct drm_rect clip = {
652 .x2 = crtc->mode.hdisplay,
653 .y2 = crtc->mode.vdisplay,
654 };
655 bool can_position = plane->type != DRM_PLANE_TYPE_PRIMARY;
656
657 ret = drm_plane_helper_check_update(plane, crtc, fb,
658 &src, &dest, &clip,
659 DRM_PLANE_HELPER_NO_SCALING,
660 DRM_PLANE_HELPER_NO_SCALING,
661 can_position, false, &visible);
662 if (ret)
663 return ret;
664
665 if (!visible)
666 return 0;
667
668 is_alpha = is_alpha_support(fb->pixel_format);
669 rb_swap = has_rb_swapped(fb->pixel_format);
670 is_yuv = is_yuv_support(fb->pixel_format);
671
672 format = vop_convert_format(fb->pixel_format);
673 if (format < 0)
674 return format;
675
676 obj = rockchip_fb_get_gem_obj(fb, 0);
677 if (!obj) {
678 DRM_ERROR("fail to get rockchip gem object from framebuffer\n");
679 return -EINVAL;
680 }
681
682 rk_obj = to_rockchip_obj(obj);
683
684 if (is_yuv) {
685 /*
686 * Src.x1 can be odd when do clip, but yuv plane start point
687 * need align with 2 pixel.
688 */
689 val = (src.x1 >> 16) % 2;
690 src.x1 += val << 16;
691 src.x2 += val << 16;
692 }
693
694 actual_w = (src.x2 - src.x1) >> 16;
695 actual_h = (src.y2 - src.y1) >> 16;
696
697 dsp_stx = dest.x1 + crtc->mode.htotal - crtc->mode.hsync_start;
698 dsp_sty = dest.y1 + crtc->mode.vtotal - crtc->mode.vsync_start;
699
700 offset = (src.x1 >> 16) * drm_format_plane_cpp(fb->pixel_format, 0);
701 offset += (src.y1 >> 16) * fb->pitches[0];
702
703 yrgb_mst = rk_obj->dma_addr + offset + fb->offsets[0];
704 y_vir_stride = fb->pitches[0] >> 2;
705
706 if (is_yuv) {
707 int hsub = drm_format_horz_chroma_subsampling(fb->pixel_format);
708 int vsub = drm_format_vert_chroma_subsampling(fb->pixel_format);
709 int bpp = drm_format_plane_cpp(fb->pixel_format, 1);
710
711 uv_obj = rockchip_fb_get_gem_obj(fb, 1);
712 if (!uv_obj) {
713 DRM_ERROR("fail to get uv object from framebuffer\n");
714 return -EINVAL;
715 }
716 rk_uv_obj = to_rockchip_obj(uv_obj);
717 uv_vir_stride = fb->pitches[1] >> 2;
718
719 offset = (src.x1 >> 16) * bpp / hsub;
720 offset += (src.y1 >> 16) * fb->pitches[1] / vsub;
721
722 uv_mst = rk_uv_obj->dma_addr + offset + fb->offsets[1];
723 }
724
725 /*
726 * If this plane update changes the plane's framebuffer, (or more
727 * precisely, if this update has a different framebuffer than the last
728 * update), enqueue it so we can track when it completes.
729 *
730 * Only when we discover that this update has completed, can we
731 * unreference any previous framebuffers.
732 */
733 mutex_lock(&vop->vsync_mutex);
734 if (fb != vop_win_last_pending_fb(vop_win)) {
735 ret = drm_vblank_get(plane->dev, vop->pipe);
736 if (ret) {
737 DRM_ERROR("failed to get vblank, %d\n", ret);
738 mutex_unlock(&vop->vsync_mutex);
739 return ret;
740 }
741
742 drm_framebuffer_reference(fb);
743
744 ret = vop_win_queue_fb(vop_win, fb, yrgb_mst, event);
745 if (ret) {
746 drm_vblank_put(plane->dev, vop->pipe);
747 mutex_unlock(&vop->vsync_mutex);
748 return ret;
749 }
750
751 vop->vsync_work_pending = true;
752 }
753 mutex_unlock(&vop->vsync_mutex);
754
755 spin_lock(&vop->reg_lock);
756
757 VOP_WIN_SET(vop, win, format, format);
758 VOP_WIN_SET(vop, win, yrgb_vir, y_vir_stride);
759 VOP_WIN_SET(vop, win, yrgb_mst, yrgb_mst);
760 if (is_yuv) {
761 VOP_WIN_SET(vop, win, uv_vir, uv_vir_stride);
762 VOP_WIN_SET(vop, win, uv_mst, uv_mst);
763 }
764 val = (actual_h - 1) << 16;
765 val |= (actual_w - 1) & 0xffff;
766 VOP_WIN_SET(vop, win, act_info, val);
767 VOP_WIN_SET(vop, win, dsp_info, val);
768 val = (dsp_sty - 1) << 16;
769 val |= (dsp_stx - 1) & 0xffff;
770 VOP_WIN_SET(vop, win, dsp_st, val);
771 VOP_WIN_SET(vop, win, rb_swap, rb_swap);
772
773 if (is_alpha) {
774 VOP_WIN_SET(vop, win, dst_alpha_ctl,
775 DST_FACTOR_M0(ALPHA_SRC_INVERSE));
776 val = SRC_ALPHA_EN(1) | SRC_COLOR_M0(ALPHA_SRC_PRE_MUL) |
777 SRC_ALPHA_M0(ALPHA_STRAIGHT) |
778 SRC_BLEND_M0(ALPHA_PER_PIX) |
779 SRC_ALPHA_CAL_M0(ALPHA_NO_SATURATION) |
780 SRC_FACTOR_M0(ALPHA_ONE);
781 VOP_WIN_SET(vop, win, src_alpha_ctl, val);
782 } else {
783 VOP_WIN_SET(vop, win, src_alpha_ctl, SRC_ALPHA_EN(0));
784 }
785
786 VOP_WIN_SET(vop, win, enable, 1);
787
788 vop_cfg_done(vop);
789 spin_unlock(&vop->reg_lock);
790
791 return 0;
792 }
793
794 static int vop_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
795 struct drm_framebuffer *fb, int crtc_x, int crtc_y,
796 unsigned int crtc_w, unsigned int crtc_h,
797 uint32_t src_x, uint32_t src_y, uint32_t src_w,
798 uint32_t src_h)
799 {
800 return vop_update_plane_event(plane, crtc, fb, crtc_x, crtc_y, crtc_w,
801 crtc_h, src_x, src_y, src_w, src_h,
802 NULL);
803 }
804
805 static int vop_update_primary_plane(struct drm_crtc *crtc,
806 struct drm_pending_vblank_event *event)
807 {
808 unsigned int crtc_w, crtc_h;
809
810 crtc_w = crtc->primary->fb->width - crtc->x;
811 crtc_h = crtc->primary->fb->height - crtc->y;
812
813 return vop_update_plane_event(crtc->primary, crtc, crtc->primary->fb,
814 0, 0, crtc_w, crtc_h, crtc->x << 16,
815 crtc->y << 16, crtc_w << 16,
816 crtc_h << 16, event);
817 }
818
819 static int vop_disable_plane(struct drm_plane *plane)
820 {
821 struct vop_win *vop_win = to_vop_win(plane);
822 const struct vop_win_data *win = vop_win->data;
823 struct vop *vop;
824 int ret;
825
826 if (!plane->crtc)
827 return 0;
828
829 vop = to_vop(plane->crtc);
830
831 ret = drm_vblank_get(plane->dev, vop->pipe);
832 if (ret) {
833 DRM_ERROR("failed to get vblank, %d\n", ret);
834 return ret;
835 }
836
837 mutex_lock(&vop->vsync_mutex);
838
839 ret = vop_win_queue_fb(vop_win, NULL, 0, NULL);
840 if (ret) {
841 drm_vblank_put(plane->dev, vop->pipe);
842 mutex_unlock(&vop->vsync_mutex);
843 return ret;
844 }
845
846 vop->vsync_work_pending = true;
847 mutex_unlock(&vop->vsync_mutex);
848
849 spin_lock(&vop->reg_lock);
850 VOP_WIN_SET(vop, win, enable, 0);
851 vop_cfg_done(vop);
852 spin_unlock(&vop->reg_lock);
853
854 return 0;
855 }
856
857 static void vop_plane_destroy(struct drm_plane *plane)
858 {
859 vop_disable_plane(plane);
860 drm_plane_cleanup(plane);
861 }
862
863 static const struct drm_plane_funcs vop_plane_funcs = {
864 .update_plane = vop_update_plane,
865 .disable_plane = vop_disable_plane,
866 .destroy = vop_plane_destroy,
867 };
868
869 int rockchip_drm_crtc_mode_config(struct drm_crtc *crtc,
870 int connector_type,
871 int out_mode)
872 {
873 struct vop *vop = to_vop(crtc);
874
875 vop->connector_type = connector_type;
876 vop->connector_out_mode = out_mode;
877
878 return 0;
879 }
880 EXPORT_SYMBOL_GPL(rockchip_drm_crtc_mode_config);
881
882 static int vop_crtc_enable_vblank(struct drm_crtc *crtc)
883 {
884 struct vop *vop = to_vop(crtc);
885 unsigned long flags;
886
887 if (!vop->is_enabled)
888 return -EPERM;
889
890 spin_lock_irqsave(&vop->irq_lock, flags);
891
892 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(1));
893
894 spin_unlock_irqrestore(&vop->irq_lock, flags);
895
896 return 0;
897 }
898
899 static void vop_crtc_disable_vblank(struct drm_crtc *crtc)
900 {
901 struct vop *vop = to_vop(crtc);
902 unsigned long flags;
903
904 if (!vop->is_enabled)
905 return;
906
907 spin_lock_irqsave(&vop->irq_lock, flags);
908 vop_mask_write(vop, INTR_CTRL0, FS_INTR_MASK, FS_INTR_EN(0));
909 spin_unlock_irqrestore(&vop->irq_lock, flags);
910 }
911
912 static const struct rockchip_crtc_funcs private_crtc_funcs = {
913 .enable_vblank = vop_crtc_enable_vblank,
914 .disable_vblank = vop_crtc_disable_vblank,
915 };
916
917 static void vop_crtc_dpms(struct drm_crtc *crtc, int mode)
918 {
919 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
920
921 switch (mode) {
922 case DRM_MODE_DPMS_ON:
923 vop_enable(crtc);
924 break;
925 case DRM_MODE_DPMS_STANDBY:
926 case DRM_MODE_DPMS_SUSPEND:
927 case DRM_MODE_DPMS_OFF:
928 vop_disable(crtc);
929 break;
930 default:
931 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
932 break;
933 }
934 }
935
936 static void vop_crtc_prepare(struct drm_crtc *crtc)
937 {
938 vop_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
939 }
940
941 static bool vop_crtc_mode_fixup(struct drm_crtc *crtc,
942 const struct drm_display_mode *mode,
943 struct drm_display_mode *adjusted_mode)
944 {
945 if (adjusted_mode->htotal == 0 || adjusted_mode->vtotal == 0)
946 return false;
947
948 return true;
949 }
950
951 static int vop_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
952 struct drm_framebuffer *old_fb)
953 {
954 int ret;
955
956 crtc->x = x;
957 crtc->y = y;
958
959 ret = vop_update_primary_plane(crtc, NULL);
960 if (ret < 0) {
961 DRM_ERROR("fail to update plane\n");
962 return ret;
963 }
964
965 return 0;
966 }
967
968 static int vop_crtc_mode_set(struct drm_crtc *crtc,
969 struct drm_display_mode *mode,
970 struct drm_display_mode *adjusted_mode,
971 int x, int y, struct drm_framebuffer *fb)
972 {
973 struct vop *vop = to_vop(crtc);
974 u16 hsync_len = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
975 u16 hdisplay = adjusted_mode->hdisplay;
976 u16 htotal = adjusted_mode->htotal;
977 u16 hact_st = adjusted_mode->htotal - adjusted_mode->hsync_start;
978 u16 hact_end = hact_st + hdisplay;
979 u16 vdisplay = adjusted_mode->vdisplay;
980 u16 vtotal = adjusted_mode->vtotal;
981 u16 vsync_len = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
982 u16 vact_st = adjusted_mode->vtotal - adjusted_mode->vsync_start;
983 u16 vact_end = vact_st + vdisplay;
984 int ret, ret_clk;
985 uint32_t val;
986
987 /*
988 * disable dclk to stop frame scan, so that we can safe config mode and
989 * enable iommu.
990 */
991 clk_disable(vop->dclk);
992
993 switch (vop->connector_type) {
994 case DRM_MODE_CONNECTOR_LVDS:
995 VOP_CTRL_SET(vop, rgb_en, 1);
996 break;
997 case DRM_MODE_CONNECTOR_eDP:
998 VOP_CTRL_SET(vop, edp_en, 1);
999 break;
1000 case DRM_MODE_CONNECTOR_HDMIA:
1001 VOP_CTRL_SET(vop, hdmi_en, 1);
1002 break;
1003 default:
1004 DRM_ERROR("unsupport connector_type[%d]\n",
1005 vop->connector_type);
1006 ret = -EINVAL;
1007 goto out;
1008 };
1009 VOP_CTRL_SET(vop, out_mode, vop->connector_out_mode);
1010
1011 val = 0x8;
1012 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) ? 0 : 1;
1013 val |= (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) ? 0 : (1 << 1);
1014 VOP_CTRL_SET(vop, pin_pol, val);
1015
1016 VOP_CTRL_SET(vop, htotal_pw, (htotal << 16) | hsync_len);
1017 val = hact_st << 16;
1018 val |= hact_end;
1019 VOP_CTRL_SET(vop, hact_st_end, val);
1020 VOP_CTRL_SET(vop, hpost_st_end, val);
1021
1022 VOP_CTRL_SET(vop, vtotal_pw, (vtotal << 16) | vsync_len);
1023 val = vact_st << 16;
1024 val |= vact_end;
1025 VOP_CTRL_SET(vop, vact_st_end, val);
1026 VOP_CTRL_SET(vop, vpost_st_end, val);
1027
1028 ret = vop_crtc_mode_set_base(crtc, x, y, fb);
1029 if (ret)
1030 goto out;
1031
1032 /*
1033 * reset dclk, take all mode config affect, so the clk would run in
1034 * correct frame.
1035 */
1036 reset_control_assert(vop->dclk_rst);
1037 usleep_range(10, 20);
1038 reset_control_deassert(vop->dclk_rst);
1039
1040 clk_set_rate(vop->dclk, adjusted_mode->clock * 1000);
1041 out:
1042 ret_clk = clk_enable(vop->dclk);
1043 if (ret_clk < 0) {
1044 dev_err(vop->dev, "failed to enable dclk - %d\n", ret_clk);
1045 return ret_clk;
1046 }
1047
1048 return ret;
1049 }
1050
1051 static void vop_crtc_commit(struct drm_crtc *crtc)
1052 {
1053 }
1054
1055 static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = {
1056 .dpms = vop_crtc_dpms,
1057 .prepare = vop_crtc_prepare,
1058 .mode_fixup = vop_crtc_mode_fixup,
1059 .mode_set = vop_crtc_mode_set,
1060 .mode_set_base = vop_crtc_mode_set_base,
1061 .commit = vop_crtc_commit,
1062 };
1063
1064 static int vop_crtc_page_flip(struct drm_crtc *crtc,
1065 struct drm_framebuffer *fb,
1066 struct drm_pending_vblank_event *event,
1067 uint32_t page_flip_flags)
1068 {
1069 struct vop *vop = to_vop(crtc);
1070 struct drm_framebuffer *old_fb = crtc->primary->fb;
1071 int ret;
1072
1073 /* when the page flip is requested, crtc should be on */
1074 if (!vop->is_enabled) {
1075 DRM_DEBUG("page flip request rejected because crtc is off.\n");
1076 return 0;
1077 }
1078
1079 crtc->primary->fb = fb;
1080
1081 ret = vop_update_primary_plane(crtc, event);
1082 if (ret)
1083 crtc->primary->fb = old_fb;
1084
1085 return ret;
1086 }
1087
1088 static void vop_win_state_complete(struct vop_win *vop_win,
1089 struct vop_win_state *state)
1090 {
1091 struct vop *vop = vop_win->vop;
1092 struct drm_crtc *crtc = &vop->crtc;
1093 struct drm_device *drm = crtc->dev;
1094 unsigned long flags;
1095
1096 if (state->event) {
1097 spin_lock_irqsave(&drm->event_lock, flags);
1098 drm_send_vblank_event(drm, -1, state->event);
1099 spin_unlock_irqrestore(&drm->event_lock, flags);
1100 }
1101
1102 list_del(&state->head);
1103 drm_vblank_put(crtc->dev, vop->pipe);
1104 }
1105
1106 static void vop_crtc_destroy(struct drm_crtc *crtc)
1107 {
1108 drm_crtc_cleanup(crtc);
1109 }
1110
1111 static const struct drm_crtc_funcs vop_crtc_funcs = {
1112 .set_config = drm_crtc_helper_set_config,
1113 .page_flip = vop_crtc_page_flip,
1114 .destroy = vop_crtc_destroy,
1115 };
1116
1117 static bool vop_win_state_is_active(struct vop_win *vop_win,
1118 struct vop_win_state *state)
1119 {
1120 bool active = false;
1121
1122 if (state->fb) {
1123 dma_addr_t yrgb_mst;
1124
1125 /* check yrgb_mst to tell if pending_fb is now front */
1126 yrgb_mst = VOP_WIN_GET_YRGBADDR(vop_win->vop, vop_win->data);
1127
1128 active = (yrgb_mst == state->yrgb_mst);
1129 } else {
1130 bool enabled;
1131
1132 /* if enable bit is clear, plane is now disabled */
1133 enabled = VOP_WIN_GET(vop_win->vop, vop_win->data, enable);
1134
1135 active = (enabled == 0);
1136 }
1137
1138 return active;
1139 }
1140
1141 static void vop_win_state_destroy(struct vop_win_state *state)
1142 {
1143 struct drm_framebuffer *fb = state->fb;
1144
1145 if (fb)
1146 drm_framebuffer_unreference(fb);
1147
1148 kfree(state);
1149 }
1150
1151 static void vop_win_update_state(struct vop_win *vop_win)
1152 {
1153 struct vop_win_state *state, *n, *new_active = NULL;
1154
1155 /* Check if any pending states are now active */
1156 list_for_each_entry(state, &vop_win->pending, head)
1157 if (vop_win_state_is_active(vop_win, state)) {
1158 new_active = state;
1159 break;
1160 }
1161
1162 if (!new_active)
1163 return;
1164
1165 /*
1166 * Destroy any 'skipped' pending states - states that were queued
1167 * before the newly active state.
1168 */
1169 list_for_each_entry_safe(state, n, &vop_win->pending, head) {
1170 if (state == new_active)
1171 break;
1172 vop_win_state_complete(vop_win, state);
1173 vop_win_state_destroy(state);
1174 }
1175
1176 vop_win_state_complete(vop_win, new_active);
1177
1178 if (vop_win->active)
1179 vop_win_state_destroy(vop_win->active);
1180 vop_win->active = new_active;
1181 }
1182
1183 static bool vop_win_has_pending_state(struct vop_win *vop_win)
1184 {
1185 return !list_empty(&vop_win->pending);
1186 }
1187
1188 static irqreturn_t vop_isr_thread(int irq, void *data)
1189 {
1190 struct vop *vop = data;
1191 const struct vop_data *vop_data = vop->data;
1192 unsigned int i;
1193
1194 mutex_lock(&vop->vsync_mutex);
1195
1196 if (!vop->vsync_work_pending)
1197 goto done;
1198
1199 vop->vsync_work_pending = false;
1200
1201 for (i = 0; i < vop_data->win_size; i++) {
1202 struct vop_win *vop_win = &vop->win[i];
1203
1204 vop_win_update_state(vop_win);
1205 if (vop_win_has_pending_state(vop_win))
1206 vop->vsync_work_pending = true;
1207 }
1208
1209 done:
1210 mutex_unlock(&vop->vsync_mutex);
1211
1212 return IRQ_HANDLED;
1213 }
1214
1215 static irqreturn_t vop_isr(int irq, void *data)
1216 {
1217 struct vop *vop = data;
1218 uint32_t intr0_reg, active_irqs;
1219 unsigned long flags;
1220 int ret = IRQ_NONE;
1221
1222 /*
1223 * INTR_CTRL0 register has interrupt status, enable and clear bits, we
1224 * must hold irq_lock to avoid a race with enable/disable_vblank().
1225 */
1226 spin_lock_irqsave(&vop->irq_lock, flags);
1227 intr0_reg = vop_readl(vop, INTR_CTRL0);
1228 active_irqs = intr0_reg & INTR_MASK;
1229 /* Clear all active interrupt sources */
1230 if (active_irqs)
1231 vop_writel(vop, INTR_CTRL0,
1232 intr0_reg | (active_irqs << INTR_CLR_SHIFT));
1233 spin_unlock_irqrestore(&vop->irq_lock, flags);
1234
1235 /* This is expected for vop iommu irqs, since the irq is shared */
1236 if (!active_irqs)
1237 return IRQ_NONE;
1238
1239 if (active_irqs & DSP_HOLD_VALID_INTR) {
1240 complete(&vop->dsp_hold_completion);
1241 active_irqs &= ~DSP_HOLD_VALID_INTR;
1242 ret = IRQ_HANDLED;
1243 }
1244
1245 if (active_irqs & FS_INTR) {
1246 drm_handle_vblank(vop->drm_dev, vop->pipe);
1247 active_irqs &= ~FS_INTR;
1248 ret = (vop->vsync_work_pending) ? IRQ_WAKE_THREAD : IRQ_HANDLED;
1249 }
1250
1251 /* Unhandled irqs are spurious. */
1252 if (active_irqs)
1253 DRM_ERROR("Unknown VOP IRQs: %#02x\n", active_irqs);
1254
1255 return ret;
1256 }
1257
1258 static int vop_create_crtc(struct vop *vop)
1259 {
1260 const struct vop_data *vop_data = vop->data;
1261 struct device *dev = vop->dev;
1262 struct drm_device *drm_dev = vop->drm_dev;
1263 struct drm_plane *primary = NULL, *cursor = NULL, *plane;
1264 struct drm_crtc *crtc = &vop->crtc;
1265 struct device_node *port;
1266 int ret;
1267 int i;
1268
1269 /*
1270 * Create drm_plane for primary and cursor planes first, since we need
1271 * to pass them to drm_crtc_init_with_planes, which sets the
1272 * "possible_crtcs" to the newly initialized crtc.
1273 */
1274 for (i = 0; i < vop_data->win_size; i++) {
1275 struct vop_win *vop_win = &vop->win[i];
1276 const struct vop_win_data *win_data = vop_win->data;
1277
1278 if (win_data->type != DRM_PLANE_TYPE_PRIMARY &&
1279 win_data->type != DRM_PLANE_TYPE_CURSOR)
1280 continue;
1281
1282 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1283 0, &vop_plane_funcs,
1284 win_data->phy->data_formats,
1285 win_data->phy->nformats,
1286 win_data->type);
1287 if (ret) {
1288 DRM_ERROR("failed to initialize plane\n");
1289 goto err_cleanup_planes;
1290 }
1291
1292 plane = &vop_win->base;
1293 if (plane->type == DRM_PLANE_TYPE_PRIMARY)
1294 primary = plane;
1295 else if (plane->type == DRM_PLANE_TYPE_CURSOR)
1296 cursor = plane;
1297 }
1298
1299 ret = drm_crtc_init_with_planes(drm_dev, crtc, primary, cursor,
1300 &vop_crtc_funcs);
1301 if (ret)
1302 return ret;
1303
1304 drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs);
1305
1306 /*
1307 * Create drm_planes for overlay windows with possible_crtcs restricted
1308 * to the newly created crtc.
1309 */
1310 for (i = 0; i < vop_data->win_size; i++) {
1311 struct vop_win *vop_win = &vop->win[i];
1312 const struct vop_win_data *win_data = vop_win->data;
1313 unsigned long possible_crtcs = 1 << drm_crtc_index(crtc);
1314
1315 if (win_data->type != DRM_PLANE_TYPE_OVERLAY)
1316 continue;
1317
1318 ret = drm_universal_plane_init(vop->drm_dev, &vop_win->base,
1319 possible_crtcs,
1320 &vop_plane_funcs,
1321 win_data->phy->data_formats,
1322 win_data->phy->nformats,
1323 win_data->type);
1324 if (ret) {
1325 DRM_ERROR("failed to initialize overlay plane\n");
1326 goto err_cleanup_crtc;
1327 }
1328 }
1329
1330 port = of_get_child_by_name(dev->of_node, "port");
1331 if (!port) {
1332 DRM_ERROR("no port node found in %s\n",
1333 dev->of_node->full_name);
1334 goto err_cleanup_crtc;
1335 }
1336
1337 init_completion(&vop->dsp_hold_completion);
1338 crtc->port = port;
1339 vop->pipe = drm_crtc_index(crtc);
1340 rockchip_register_crtc_funcs(drm_dev, &private_crtc_funcs, vop->pipe);
1341
1342 return 0;
1343
1344 err_cleanup_crtc:
1345 drm_crtc_cleanup(crtc);
1346 err_cleanup_planes:
1347 list_for_each_entry(plane, &drm_dev->mode_config.plane_list, head)
1348 drm_plane_cleanup(plane);
1349 return ret;
1350 }
1351
1352 static void vop_destroy_crtc(struct vop *vop)
1353 {
1354 struct drm_crtc *crtc = &vop->crtc;
1355
1356 rockchip_unregister_crtc_funcs(vop->drm_dev, vop->pipe);
1357 of_node_put(crtc->port);
1358 drm_crtc_cleanup(crtc);
1359 }
1360
1361 static int vop_initial(struct vop *vop)
1362 {
1363 const struct vop_data *vop_data = vop->data;
1364 const struct vop_reg_data *init_table = vop_data->init_table;
1365 struct reset_control *ahb_rst;
1366 int i, ret;
1367
1368 vop->hclk = devm_clk_get(vop->dev, "hclk_vop");
1369 if (IS_ERR(vop->hclk)) {
1370 dev_err(vop->dev, "failed to get hclk source\n");
1371 return PTR_ERR(vop->hclk);
1372 }
1373 vop->aclk = devm_clk_get(vop->dev, "aclk_vop");
1374 if (IS_ERR(vop->aclk)) {
1375 dev_err(vop->dev, "failed to get aclk source\n");
1376 return PTR_ERR(vop->aclk);
1377 }
1378 vop->dclk = devm_clk_get(vop->dev, "dclk_vop");
1379 if (IS_ERR(vop->dclk)) {
1380 dev_err(vop->dev, "failed to get dclk source\n");
1381 return PTR_ERR(vop->dclk);
1382 }
1383
1384 ret = clk_prepare(vop->hclk);
1385 if (ret < 0) {
1386 dev_err(vop->dev, "failed to prepare hclk\n");
1387 return ret;
1388 }
1389
1390 ret = clk_prepare(vop->dclk);
1391 if (ret < 0) {
1392 dev_err(vop->dev, "failed to prepare dclk\n");
1393 goto err_unprepare_hclk;
1394 }
1395
1396 ret = clk_prepare(vop->aclk);
1397 if (ret < 0) {
1398 dev_err(vop->dev, "failed to prepare aclk\n");
1399 goto err_unprepare_dclk;
1400 }
1401
1402 /*
1403 * enable hclk, so that we can config vop register.
1404 */
1405 ret = clk_enable(vop->hclk);
1406 if (ret < 0) {
1407 dev_err(vop->dev, "failed to prepare aclk\n");
1408 goto err_unprepare_aclk;
1409 }
1410 /*
1411 * do hclk_reset, reset all vop registers.
1412 */
1413 ahb_rst = devm_reset_control_get(vop->dev, "ahb");
1414 if (IS_ERR(ahb_rst)) {
1415 dev_err(vop->dev, "failed to get ahb reset\n");
1416 ret = PTR_ERR(ahb_rst);
1417 goto err_disable_hclk;
1418 }
1419 reset_control_assert(ahb_rst);
1420 usleep_range(10, 20);
1421 reset_control_deassert(ahb_rst);
1422
1423 memcpy(vop->regsbak, vop->regs, vop->len);
1424
1425 for (i = 0; i < vop_data->table_size; i++)
1426 vop_writel(vop, init_table[i].offset, init_table[i].value);
1427
1428 for (i = 0; i < vop_data->win_size; i++) {
1429 const struct vop_win_data *win = &vop_data->win[i];
1430
1431 VOP_WIN_SET(vop, win, enable, 0);
1432 }
1433
1434 vop_cfg_done(vop);
1435
1436 /*
1437 * do dclk_reset, let all config take affect.
1438 */
1439 vop->dclk_rst = devm_reset_control_get(vop->dev, "dclk");
1440 if (IS_ERR(vop->dclk_rst)) {
1441 dev_err(vop->dev, "failed to get dclk reset\n");
1442 ret = PTR_ERR(vop->dclk_rst);
1443 goto err_unprepare_aclk;
1444 }
1445 reset_control_assert(vop->dclk_rst);
1446 usleep_range(10, 20);
1447 reset_control_deassert(vop->dclk_rst);
1448
1449 clk_disable(vop->hclk);
1450
1451 vop->is_enabled = false;
1452
1453 return 0;
1454
1455 err_disable_hclk:
1456 clk_disable(vop->hclk);
1457 err_unprepare_aclk:
1458 clk_unprepare(vop->aclk);
1459 err_unprepare_dclk:
1460 clk_unprepare(vop->dclk);
1461 err_unprepare_hclk:
1462 clk_unprepare(vop->hclk);
1463 return ret;
1464 }
1465
1466 /*
1467 * Initialize the vop->win array elements.
1468 */
1469 static void vop_win_init(struct vop *vop)
1470 {
1471 const struct vop_data *vop_data = vop->data;
1472 unsigned int i;
1473
1474 for (i = 0; i < vop_data->win_size; i++) {
1475 struct vop_win *vop_win = &vop->win[i];
1476 const struct vop_win_data *win_data = &vop_data->win[i];
1477
1478 vop_win->data = win_data;
1479 vop_win->vop = vop;
1480 INIT_LIST_HEAD(&vop_win->pending);
1481 }
1482 }
1483
1484 static int vop_bind(struct device *dev, struct device *master, void *data)
1485 {
1486 struct platform_device *pdev = to_platform_device(dev);
1487 const struct of_device_id *of_id;
1488 const struct vop_data *vop_data;
1489 struct drm_device *drm_dev = data;
1490 struct vop *vop;
1491 struct resource *res;
1492 size_t alloc_size;
1493 int ret, irq;
1494
1495 of_id = of_match_device(vop_driver_dt_match, dev);
1496 vop_data = of_id->data;
1497 if (!vop_data)
1498 return -ENODEV;
1499
1500 /* Allocate vop struct and its vop_win array */
1501 alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
1502 vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
1503 if (!vop)
1504 return -ENOMEM;
1505
1506 vop->dev = dev;
1507 vop->data = vop_data;
1508 vop->drm_dev = drm_dev;
1509 dev_set_drvdata(dev, vop);
1510
1511 vop_win_init(vop);
1512
1513 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1514 vop->len = resource_size(res);
1515 vop->regs = devm_ioremap_resource(dev, res);
1516 if (IS_ERR(vop->regs))
1517 return PTR_ERR(vop->regs);
1518
1519 vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL);
1520 if (!vop->regsbak)
1521 return -ENOMEM;
1522
1523 ret = vop_initial(vop);
1524 if (ret < 0) {
1525 dev_err(&pdev->dev, "cannot initial vop dev - err %d\n", ret);
1526 return ret;
1527 }
1528
1529 irq = platform_get_irq(pdev, 0);
1530 if (irq < 0) {
1531 dev_err(dev, "cannot find irq for vop\n");
1532 return irq;
1533 }
1534 vop->irq = (unsigned int)irq;
1535
1536 spin_lock_init(&vop->reg_lock);
1537 spin_lock_init(&vop->irq_lock);
1538
1539 mutex_init(&vop->vsync_mutex);
1540
1541 ret = devm_request_threaded_irq(dev, vop->irq, vop_isr, vop_isr_thread,
1542 IRQF_SHARED, dev_name(dev), vop);
1543 if (ret)
1544 return ret;
1545
1546 /* IRQ is initially disabled; it gets enabled in power_on */
1547 disable_irq(vop->irq);
1548
1549 ret = vop_create_crtc(vop);
1550 if (ret)
1551 return ret;
1552
1553 pm_runtime_enable(&pdev->dev);
1554 return 0;
1555 }
1556
1557 static void vop_unbind(struct device *dev, struct device *master, void *data)
1558 {
1559 struct vop *vop = dev_get_drvdata(dev);
1560
1561 pm_runtime_disable(dev);
1562 vop_destroy_crtc(vop);
1563 }
1564
1565 static const struct component_ops vop_component_ops = {
1566 .bind = vop_bind,
1567 .unbind = vop_unbind,
1568 };
1569
1570 static int vop_probe(struct platform_device *pdev)
1571 {
1572 struct device *dev = &pdev->dev;
1573
1574 if (!dev->of_node) {
1575 dev_err(dev, "can't find vop devices\n");
1576 return -ENODEV;
1577 }
1578
1579 return component_add(dev, &vop_component_ops);
1580 }
1581
1582 static int vop_remove(struct platform_device *pdev)
1583 {
1584 component_del(&pdev->dev, &vop_component_ops);
1585
1586 return 0;
1587 }
1588
1589 struct platform_driver vop_platform_driver = {
1590 .probe = vop_probe,
1591 .remove = vop_remove,
1592 .driver = {
1593 .name = "rockchip-vop",
1594 .owner = THIS_MODULE,
1595 .of_match_table = of_match_ptr(vop_driver_dt_match),
1596 },
1597 };
1598
1599 module_platform_driver(vop_platform_driver);
1600
1601 MODULE_AUTHOR("Mark Yao <mark.yao@rock-chips.com>");
1602 MODULE_DESCRIPTION("ROCKCHIP VOP Driver");
1603 MODULE_LICENSE("GPL v2");
This page took 0.069053 seconds and 5 git commands to generate.