2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
30 #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
38 /* layer mixer used for this CRTC (+ its lock): */
39 #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
41 spinlock_t lm_lock
; /* protect REG_MDP5_LM_* registers */
43 /* CTL used for this CRTC: */
46 /* if there is a pending flip, these will be non-null: */
47 struct drm_pending_vblank_event
*event
;
49 #define PENDING_CURSOR 0x1
50 #define PENDING_FLIP 0x2
53 /* for unref'ing cursor bo's after scanout completes: */
54 struct drm_flip_work unref_cursor_work
;
56 struct mdp_irq vblank
;
60 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
63 /* current cursor being scanned out: */
64 struct drm_gem_object
*scanout_bo
;
69 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
71 static struct mdp5_kms
*get_kms(struct drm_crtc
*crtc
)
73 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
74 return to_mdp5_kms(to_mdp_kms(priv
->kms
));
77 static void request_pending(struct drm_crtc
*crtc
, uint32_t pending
)
79 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
81 atomic_or(pending
, &mdp5_crtc
->pending
);
82 mdp_irq_register(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
85 #define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
87 static void crtc_flush(struct drm_crtc
*crtc
, u32 flush_mask
)
89 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
91 DBG("%s: flush=%08x", mdp5_crtc
->name
, flush_mask
);
92 mdp5_ctl_commit(mdp5_crtc
->ctl
, flush_mask
);
96 * flush updates, to make sure hw is updated to new scanout fb,
97 * so that we can safely queue unref to current fb (ie. next
98 * vblank we know hw is done w/ previous scanout_fb).
100 static void crtc_flush_all(struct drm_crtc
*crtc
)
102 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
103 struct drm_plane
*plane
;
104 uint32_t flush_mask
= 0;
106 /* we could have already released CTL in the disable path: */
110 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
111 flush_mask
|= mdp5_plane_get_flush(plane
);
113 flush_mask
|= mdp5_ctl_get_flush(mdp5_crtc
->ctl
);
114 flush_mask
|= mdp5_lm_get_flush(mdp5_crtc
->lm
);
116 crtc_flush(crtc
, flush_mask
);
119 /* if file!=NULL, this is preclose potential cancel-flip path */
120 static void complete_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
122 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
123 struct drm_device
*dev
= crtc
->dev
;
124 struct drm_pending_vblank_event
*event
;
125 struct drm_plane
*plane
;
128 spin_lock_irqsave(&dev
->event_lock
, flags
);
129 event
= mdp5_crtc
->event
;
131 /* if regular vblank case (!file) or if cancel-flip from
132 * preclose on file that requested flip, then send the
135 if (!file
|| (event
->base
.file_priv
== file
)) {
136 mdp5_crtc
->event
= NULL
;
137 DBG("%s: send event: %p", mdp5_crtc
->name
, event
);
138 drm_send_vblank_event(dev
, mdp5_crtc
->id
, event
);
141 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
143 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
144 mdp5_plane_complete_flip(plane
);
148 static void unref_cursor_worker(struct drm_flip_work
*work
, void *val
)
150 struct mdp5_crtc
*mdp5_crtc
=
151 container_of(work
, struct mdp5_crtc
, unref_cursor_work
);
152 struct mdp5_kms
*mdp5_kms
= get_kms(&mdp5_crtc
->base
);
154 msm_gem_put_iova(val
, mdp5_kms
->id
);
155 drm_gem_object_unreference_unlocked(val
);
158 static void mdp5_crtc_destroy(struct drm_crtc
*crtc
)
160 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
162 drm_crtc_cleanup(crtc
);
163 drm_flip_work_cleanup(&mdp5_crtc
->unref_cursor_work
);
168 static bool mdp5_crtc_mode_fixup(struct drm_crtc
*crtc
,
169 const struct drm_display_mode
*mode
,
170 struct drm_display_mode
*adjusted_mode
)
176 * blend_setup() - blend all the planes of a CRTC
178 * When border is enabled, the border color will ALWAYS be the base layer.
179 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
180 * If disabled, the first plane starts at STAGE_BASE.
183 * Border is not enabled here because the private plane is exactly
184 * the CRTC resolution.
186 static void blend_setup(struct drm_crtc
*crtc
)
188 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
189 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
190 struct drm_plane
*plane
;
191 const struct mdp5_cfg_hw
*hw_cfg
;
192 uint32_t lm
= mdp5_crtc
->lm
, blend_cfg
= 0;
194 #define blender(stage) ((stage) - STAGE_BASE)
196 hw_cfg
= mdp5_cfg_get_hw_config(mdp5_kms
->cfg
);
198 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
200 /* ctl could be released already when we are shutting down: */
204 drm_atomic_crtc_for_each_plane(plane
, crtc
) {
205 enum mdp_mixer_stage_id stage
=
206 to_mdp5_plane_state(plane
->state
)->stage
;
209 * Note: This cannot happen with current implementation but
210 * we need to check this condition once z property is added
212 BUG_ON(stage
> hw_cfg
->lm
.nb_stages
);
216 REG_MDP5_LM_BLEND_OP_MODE(lm
, blender(stage
)),
217 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST
) |
218 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST
));
219 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_FG_ALPHA(lm
,
220 blender(stage
)), 0xff);
221 mdp5_write(mdp5_kms
, REG_MDP5_LM_BLEND_BG_ALPHA(lm
,
222 blender(stage
)), 0x00);
224 blend_cfg
|= mdp_ctl_blend_mask(mdp5_plane_pipe(plane
), stage
);
225 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc
->name
,
226 pipe2name(mdp5_plane_pipe(plane
)), stage
);
229 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc
->name
, lm
, blend_cfg
);
230 mdp5_ctl_blend(mdp5_crtc
->ctl
, lm
, blend_cfg
);
233 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
236 static void mdp5_crtc_mode_set_nofb(struct drm_crtc
*crtc
)
238 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
239 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
241 struct drm_display_mode
*mode
;
243 if (WARN_ON(!crtc
->state
))
246 mode
= &crtc
->state
->adjusted_mode
;
248 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
249 mdp5_crtc
->name
, mode
->base
.id
, mode
->name
,
250 mode
->vrefresh
, mode
->clock
,
251 mode
->hdisplay
, mode
->hsync_start
,
252 mode
->hsync_end
, mode
->htotal
,
253 mode
->vdisplay
, mode
->vsync_start
,
254 mode
->vsync_end
, mode
->vtotal
,
255 mode
->type
, mode
->flags
);
257 spin_lock_irqsave(&mdp5_crtc
->lm_lock
, flags
);
258 mdp5_write(mdp5_kms
, REG_MDP5_LM_OUT_SIZE(mdp5_crtc
->lm
),
259 MDP5_LM_OUT_SIZE_WIDTH(mode
->hdisplay
) |
260 MDP5_LM_OUT_SIZE_HEIGHT(mode
->vdisplay
));
261 spin_unlock_irqrestore(&mdp5_crtc
->lm_lock
, flags
);
264 static void mdp5_crtc_disable(struct drm_crtc
*crtc
)
266 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
267 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
269 DBG("%s", mdp5_crtc
->name
);
271 if (WARN_ON(!mdp5_crtc
->enabled
))
274 /* set STAGE_UNUSED for all layers */
275 mdp5_ctl_blend(mdp5_crtc
->ctl
, mdp5_crtc
->lm
, 0x00000000);
277 mdp_irq_unregister(&mdp5_kms
->base
, &mdp5_crtc
->err
);
278 mdp5_disable(mdp5_kms
);
280 mdp5_crtc
->enabled
= false;
283 static void mdp5_crtc_enable(struct drm_crtc
*crtc
)
285 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
286 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
288 DBG("%s", mdp5_crtc
->name
);
290 if (WARN_ON(mdp5_crtc
->enabled
))
293 mdp5_enable(mdp5_kms
);
294 mdp_irq_register(&mdp5_kms
->base
, &mdp5_crtc
->err
);
296 crtc_flush_all(crtc
);
298 mdp5_crtc
->enabled
= true;
302 struct drm_plane
*plane
;
303 struct mdp5_plane_state
*state
;
306 static int pstate_cmp(const void *a
, const void *b
)
308 struct plane_state
*pa
= (struct plane_state
*)a
;
309 struct plane_state
*pb
= (struct plane_state
*)b
;
310 return pa
->state
->zpos
- pb
->state
->zpos
;
313 static int mdp5_crtc_atomic_check(struct drm_crtc
*crtc
,
314 struct drm_crtc_state
*state
)
316 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
317 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
318 struct drm_plane
*plane
;
319 struct drm_device
*dev
= crtc
->dev
;
320 struct plane_state pstates
[STAGE3
+ 1];
323 DBG("%s: check", mdp5_crtc
->name
);
325 /* request a free CTL, if none is already allocated for this CRTC */
326 if (state
->enable
&& !mdp5_crtc
->ctl
) {
327 mdp5_crtc
->ctl
= mdp5_ctlm_request(mdp5_kms
->ctlm
, crtc
);
328 if (WARN_ON(!mdp5_crtc
->ctl
))
332 /* verify that there are not too many planes attached to crtc
333 * and that we don't have conflicting mixer stages:
335 drm_atomic_crtc_state_for_each_plane(plane
, state
) {
336 struct drm_plane_state
*pstate
;
338 if (cnt
>= ARRAY_SIZE(pstates
)) {
339 dev_err(dev
->dev
, "too many planes!\n");
343 pstate
= state
->state
->plane_states
[drm_plane_index(plane
)];
345 /* plane might not have changed, in which case take
349 pstate
= plane
->state
;
351 pstates
[cnt
].plane
= plane
;
352 pstates
[cnt
].state
= to_mdp5_plane_state(pstate
);
357 sort(pstates
, cnt
, sizeof(pstates
[0]), pstate_cmp
, NULL
);
359 for (i
= 0; i
< cnt
; i
++) {
360 pstates
[i
].state
->stage
= STAGE_BASE
+ i
;
361 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc
->name
,
362 pipe2name(mdp5_plane_pipe(pstates
[i
].plane
)),
363 pstates
[i
].state
->stage
);
369 static void mdp5_crtc_atomic_begin(struct drm_crtc
*crtc
)
371 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
372 DBG("%s: begin", mdp5_crtc
->name
);
375 static void mdp5_crtc_atomic_flush(struct drm_crtc
*crtc
)
377 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
378 struct drm_device
*dev
= crtc
->dev
;
381 DBG("%s: event: %p", mdp5_crtc
->name
, crtc
->state
->event
);
383 WARN_ON(mdp5_crtc
->event
);
385 spin_lock_irqsave(&dev
->event_lock
, flags
);
386 mdp5_crtc
->event
= crtc
->state
->event
;
387 spin_unlock_irqrestore(&dev
->event_lock
, flags
);
390 crtc_flush_all(crtc
);
391 request_pending(crtc
, PENDING_FLIP
);
393 if (mdp5_crtc
->ctl
&& !crtc
->state
->enable
) {
394 mdp5_ctl_release(mdp5_crtc
->ctl
);
395 mdp5_crtc
->ctl
= NULL
;
399 static int mdp5_crtc_set_property(struct drm_crtc
*crtc
,
400 struct drm_property
*property
, uint64_t val
)
406 static int mdp5_crtc_cursor_set(struct drm_crtc
*crtc
,
407 struct drm_file
*file
, uint32_t handle
,
408 uint32_t width
, uint32_t height
)
410 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
411 struct drm_device
*dev
= crtc
->dev
;
412 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
413 struct drm_gem_object
*cursor_bo
, *old_bo
;
414 uint32_t blendcfg
, cursor_addr
, stride
;
417 enum mdp5_cursor_alpha cur_alpha
= CURSOR_ALPHA_PER_PIXEL
;
418 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
421 if ((width
> CURSOR_WIDTH
) || (height
> CURSOR_HEIGHT
)) {
422 dev_err(dev
->dev
, "bad cursor size: %dx%d\n", width
, height
);
426 if (NULL
== mdp5_crtc
->ctl
)
431 return mdp5_ctl_set_cursor(mdp5_crtc
->ctl
, false);
434 cursor_bo
= drm_gem_object_lookup(dev
, file
, handle
);
438 ret
= msm_gem_get_iova(cursor_bo
, mdp5_kms
->id
, &cursor_addr
);
443 drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888
, &depth
, &bpp
);
444 stride
= width
* (bpp
>> 3);
446 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
447 old_bo
= mdp5_crtc
->cursor
.scanout_bo
;
449 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_STRIDE(lm
), stride
);
450 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_FORMAT(lm
),
451 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888
));
452 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_IMG_SIZE(lm
),
453 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height
) |
454 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width
));
455 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(lm
),
456 MDP5_LM_CURSOR_SIZE_ROI_H(height
) |
457 MDP5_LM_CURSOR_SIZE_ROI_W(width
));
458 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BASE_ADDR(lm
), cursor_addr
);
461 blendcfg
= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN
;
462 blendcfg
|= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN
;
463 blendcfg
|= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha
);
464 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm
), blendcfg
);
466 mdp5_crtc
->cursor
.scanout_bo
= cursor_bo
;
467 mdp5_crtc
->cursor
.width
= width
;
468 mdp5_crtc
->cursor
.height
= height
;
469 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
471 ret
= mdp5_ctl_set_cursor(mdp5_crtc
->ctl
, true);
475 flush_mask
|= mdp5_ctl_get_flush(mdp5_crtc
->ctl
);
476 crtc_flush(crtc
, flush_mask
);
480 drm_flip_work_queue(&mdp5_crtc
->unref_cursor_work
, old_bo
);
481 /* enable vblank to complete cursor work: */
482 request_pending(crtc
, PENDING_CURSOR
);
487 static int mdp5_crtc_cursor_move(struct drm_crtc
*crtc
, int x
, int y
)
489 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
490 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
491 uint32_t flush_mask
= mdp_ctl_flush_mask_cursor(0);
492 uint32_t xres
= crtc
->mode
.hdisplay
;
493 uint32_t yres
= crtc
->mode
.vdisplay
;
502 * Cursor Region Of Interest (ROI) is a plane read from cursor
503 * buffer to render. The ROI region is determined by the visiblity of
504 * the cursor point. In the default Cursor image the cursor point will
505 * be at the top left of the cursor image, unless it is specified
506 * otherwise using hotspot feature.
508 * If the cursor point reaches the right (xres - x < cursor.width) or
509 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
510 * width and ROI height need to be evaluated to crop the cursor image
512 * (xres-x) will be new cursor width when x > (xres - cursor.width)
513 * (yres-y) will be new cursor height when y > (yres - cursor.height)
515 roi_w
= min(mdp5_crtc
->cursor
.width
, xres
- x
);
516 roi_h
= min(mdp5_crtc
->cursor
.height
, yres
- y
);
518 spin_lock_irqsave(&mdp5_crtc
->cursor
.lock
, flags
);
519 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc
->lm
),
520 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h
) |
521 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w
));
522 mdp5_write(mdp5_kms
, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc
->lm
),
523 MDP5_LM_CURSOR_START_XY_Y_START(y
) |
524 MDP5_LM_CURSOR_START_XY_X_START(x
));
525 spin_unlock_irqrestore(&mdp5_crtc
->cursor
.lock
, flags
);
527 crtc_flush(crtc
, flush_mask
);
532 static const struct drm_crtc_funcs mdp5_crtc_funcs
= {
533 .set_config
= drm_atomic_helper_set_config
,
534 .destroy
= mdp5_crtc_destroy
,
535 .page_flip
= drm_atomic_helper_page_flip
,
536 .set_property
= mdp5_crtc_set_property
,
537 .reset
= drm_atomic_helper_crtc_reset
,
538 .atomic_duplicate_state
= drm_atomic_helper_crtc_duplicate_state
,
539 .atomic_destroy_state
= drm_atomic_helper_crtc_destroy_state
,
540 .cursor_set
= mdp5_crtc_cursor_set
,
541 .cursor_move
= mdp5_crtc_cursor_move
,
544 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs
= {
545 .mode_fixup
= mdp5_crtc_mode_fixup
,
546 .mode_set_nofb
= mdp5_crtc_mode_set_nofb
,
547 .prepare
= mdp5_crtc_disable
,
548 .commit
= mdp5_crtc_enable
,
549 .atomic_check
= mdp5_crtc_atomic_check
,
550 .atomic_begin
= mdp5_crtc_atomic_begin
,
551 .atomic_flush
= mdp5_crtc_atomic_flush
,
554 static void mdp5_crtc_vblank_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
556 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, vblank
);
557 struct drm_crtc
*crtc
= &mdp5_crtc
->base
;
558 struct msm_drm_private
*priv
= crtc
->dev
->dev_private
;
561 mdp_irq_unregister(&get_kms(crtc
)->base
, &mdp5_crtc
->vblank
);
563 pending
= atomic_xchg(&mdp5_crtc
->pending
, 0);
565 if (pending
& PENDING_FLIP
) {
566 complete_flip(crtc
, NULL
);
569 if (pending
& PENDING_CURSOR
)
570 drm_flip_work_commit(&mdp5_crtc
->unref_cursor_work
, priv
->wq
);
573 static void mdp5_crtc_err_irq(struct mdp_irq
*irq
, uint32_t irqstatus
)
575 struct mdp5_crtc
*mdp5_crtc
= container_of(irq
, struct mdp5_crtc
, err
);
577 DBG("%s: error: %08x", mdp5_crtc
->name
, irqstatus
);
580 uint32_t mdp5_crtc_vblank(struct drm_crtc
*crtc
)
582 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
583 return mdp5_crtc
->vblank
.irqmask
;
586 void mdp5_crtc_cancel_pending_flip(struct drm_crtc
*crtc
, struct drm_file
*file
)
588 DBG("cancel: %p", file
);
589 complete_flip(crtc
, file
);
592 /* set interface for routing crtc->encoder: */
593 void mdp5_crtc_set_intf(struct drm_crtc
*crtc
, int intf
,
594 enum mdp5_intf intf_id
)
596 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
597 struct mdp5_kms
*mdp5_kms
= get_kms(crtc
);
598 uint32_t flush_mask
= 0;
602 /* now that we know what irq's we want: */
603 mdp5_crtc
->err
.irqmask
= intf2err(intf
);
604 mdp5_crtc
->vblank
.irqmask
= intf2vblank(intf
);
605 mdp_irq_update(&mdp5_kms
->base
);
607 spin_lock_irqsave(&mdp5_kms
->resource_lock
, flags
);
608 intf_sel
= mdp5_read(mdp5_kms
, REG_MDP5_DISP_INTF_SEL
);
612 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF0__MASK
;
613 intf_sel
|= MDP5_DISP_INTF_SEL_INTF0(intf_id
);
616 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF1__MASK
;
617 intf_sel
|= MDP5_DISP_INTF_SEL_INTF1(intf_id
);
620 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF2__MASK
;
621 intf_sel
|= MDP5_DISP_INTF_SEL_INTF2(intf_id
);
624 intf_sel
&= ~MDP5_DISP_INTF_SEL_INTF3__MASK
;
625 intf_sel
|= MDP5_DISP_INTF_SEL_INTF3(intf_id
);
632 mdp5_write(mdp5_kms
, REG_MDP5_DISP_INTF_SEL
, intf_sel
);
633 spin_unlock_irqrestore(&mdp5_kms
->resource_lock
, flags
);
635 DBG("%s: intf_sel=%08x", mdp5_crtc
->name
, intf_sel
);
636 mdp5_ctl_set_intf(mdp5_crtc
->ctl
, intf
);
637 flush_mask
|= mdp5_ctl_get_flush(mdp5_crtc
->ctl
);
638 flush_mask
|= mdp5_lm_get_flush(mdp5_crtc
->lm
);
640 crtc_flush(crtc
, flush_mask
);
643 int mdp5_crtc_get_lm(struct drm_crtc
*crtc
)
645 struct mdp5_crtc
*mdp5_crtc
= to_mdp5_crtc(crtc
);
650 return mdp5_crtc
->lm
;
653 /* initialize crtc */
654 struct drm_crtc
*mdp5_crtc_init(struct drm_device
*dev
,
655 struct drm_plane
*plane
, int id
)
657 struct drm_crtc
*crtc
= NULL
;
658 struct mdp5_crtc
*mdp5_crtc
;
660 mdp5_crtc
= kzalloc(sizeof(*mdp5_crtc
), GFP_KERNEL
);
662 return ERR_PTR(-ENOMEM
);
664 crtc
= &mdp5_crtc
->base
;
667 mdp5_crtc
->lm
= GET_LM_ID(id
);
669 spin_lock_init(&mdp5_crtc
->lm_lock
);
670 spin_lock_init(&mdp5_crtc
->cursor
.lock
);
672 mdp5_crtc
->vblank
.irq
= mdp5_crtc_vblank_irq
;
673 mdp5_crtc
->err
.irq
= mdp5_crtc_err_irq
;
675 snprintf(mdp5_crtc
->name
, sizeof(mdp5_crtc
->name
), "%s:%d",
676 pipe2name(mdp5_plane_pipe(plane
)), id
);
678 drm_crtc_init_with_planes(dev
, crtc
, plane
, NULL
, &mdp5_crtc_funcs
);
680 drm_flip_work_init(&mdp5_crtc
->unref_cursor_work
,
681 "unref cursor", unref_cursor_worker
);
683 drm_crtc_helper_add(crtc
, &mdp5_crtc_helper_funcs
);
686 mdp5_plane_install_properties(plane
, &crtc
->base
);