drm/msm/mdp5: update irqs on crtc<->encoder link change
[deliverable/linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
CommitLineData
06c0dd96 1/*
0deed25b 2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
06c0dd96
RC
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include "mdp5_kms.h"
20
ed851963 21#include <linux/sort.h>
06c0dd96
RC
22#include <drm/drm_mode.h>
23#include "drm_crtc.h"
24#include "drm_crtc_helper.h"
25#include "drm_flip_work.h"
26
0deed25b
SV
27#define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
06c0dd96
RC
29struct mdp5_crtc {
30 struct drm_crtc base;
31 char name[8];
06c0dd96
RC
32 int id;
33 bool enabled;
34
0deed25b
SV
35 /* layer mixer used for this CRTC (+ its lock): */
36#define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
42238da8 41 struct mdp5_ctl *ctl;
06c0dd96
RC
42
43 /* if there is a pending flip, these will be non-null: */
44 struct drm_pending_vblank_event *event;
06c0dd96
RC
45
46#define PENDING_CURSOR 0x1
47#define PENDING_FLIP 0x2
48 atomic_t pending;
49
06c0dd96
RC
50 struct mdp_irq vblank;
51 struct mdp_irq err;
52};
53#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
54
55static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
56{
57 struct msm_drm_private *priv = crtc->dev->dev_private;
58 return to_mdp5_kms(to_mdp_kms(priv->kms));
59}
60
61static void request_pending(struct drm_crtc *crtc, uint32_t pending)
62{
63 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
64
65 atomic_or(pending, &mdp5_crtc->pending);
66 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
67}
68
0deed25b
SV
69#define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
70
71static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
72{
73 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
74
75 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
76 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
77}
78
79/*
80 * flush updates, to make sure hw is updated to new scanout fb,
81 * so that we can safely queue unref to current fb (ie. next
82 * vblank we know hw is done w/ previous scanout_fb).
83 */
84static void crtc_flush_all(struct drm_crtc *crtc)
06c0dd96
RC
85{
86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
a8cecf33 87 struct drm_plane *plane;
0deed25b
SV
88 uint32_t flush_mask = 0;
89
90 /* we could have already released CTL in the disable path: */
91 if (!mdp5_crtc->ctl)
92 return;
06c0dd96 93
93b02beb 94 drm_atomic_crtc_for_each_plane(plane, crtc) {
0deed25b 95 flush_mask |= mdp5_plane_get_flush(plane);
06c0dd96 96 }
0deed25b
SV
97 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
a8cecf33 99
0deed25b 100 crtc_flush(crtc, flush_mask);
06c0dd96
RC
101}
102
06c0dd96
RC
103/* if file!=NULL, this is preclose potential cancel-flip path */
104static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
105{
106 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
107 struct drm_device *dev = crtc->dev;
108 struct drm_pending_vblank_event *event;
a8cecf33
RC
109 struct drm_plane *plane;
110 unsigned long flags;
06c0dd96
RC
111
112 spin_lock_irqsave(&dev->event_lock, flags);
113 event = mdp5_crtc->event;
114 if (event) {
115 /* if regular vblank case (!file) or if cancel-flip from
116 * preclose on file that requested flip, then send the
117 * event:
118 */
119 if (!file || (event->base.file_priv == file)) {
120 mdp5_crtc->event = NULL;
ed851963 121 DBG("%s: send event: %p", mdp5_crtc->name, event);
06c0dd96
RC
122 drm_send_vblank_event(dev, mdp5_crtc->id, event);
123 }
124 }
125 spin_unlock_irqrestore(&dev->event_lock, flags);
126
93b02beb 127 drm_atomic_crtc_for_each_plane(plane, crtc) {
a8cecf33 128 mdp5_plane_complete_flip(plane);
93b02beb 129 }
06c0dd96
RC
130}
131
06c0dd96
RC
132static void mdp5_crtc_destroy(struct drm_crtc *crtc)
133{
134 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
135
06c0dd96 136 drm_crtc_cleanup(crtc);
06c0dd96
RC
137
138 kfree(mdp5_crtc);
139}
140
141static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
142{
143 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
144 struct mdp5_kms *mdp5_kms = get_kms(crtc);
145 bool enabled = (mode == DRM_MODE_DPMS_ON);
146
147 DBG("%s: mode=%d", mdp5_crtc->name, mode);
148
149 if (enabled != mdp5_crtc->enabled) {
150 if (enabled) {
151 mdp5_enable(mdp5_kms);
152 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
153 } else {
d6ac4a84
RC
154 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
06c0dd96
RC
156 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
157 mdp5_disable(mdp5_kms);
158 }
159 mdp5_crtc->enabled = enabled;
160 }
161}
162
163static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
164 const struct drm_display_mode *mode,
165 struct drm_display_mode *adjusted_mode)
166{
167 return true;
168}
169
0deed25b
SV
170/*
171 * blend_setup() - blend all the planes of a CRTC
172 *
173 * When border is enabled, the border color will ALWAYS be the base layer.
174 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175 * If disabled, the first plane starts at STAGE_BASE.
176 *
177 * Note:
178 * Border is not enabled here because the private plane is exactly
179 * the CRTC resolution.
180 */
06c0dd96
RC
181static void blend_setup(struct drm_crtc *crtc)
182{
183 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
184 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0deed25b
SV
185 struct drm_plane *plane;
186 const struct mdp5_cfg_hw *hw_cfg;
187 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
0deed25b
SV
188 unsigned long flags;
189#define blender(stage) ((stage) - STAGE_BASE)
06c0dd96 190
42238da8 191 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
06c0dd96 192
0deed25b
SV
193 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
194
195 /* ctl could be released already when we are shutting down: */
196 if (!mdp5_crtc->ctl)
197 goto out;
198
93b02beb 199 drm_atomic_crtc_for_each_plane(plane, crtc) {
ed851963
RC
200 enum mdp_mixer_stage_id stage =
201 to_mdp5_plane_state(plane->state)->stage;
06c0dd96 202
0deed25b
SV
203 /*
204 * Note: This cannot happen with current implementation but
205 * we need to check this condition once z property is added
206 */
207 BUG_ON(stage > hw_cfg->lm.nb_stages);
208
209 /* LM */
210 mdp5_write(mdp5_kms,
211 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215 blender(stage)), 0xff);
216 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217 blender(stage)), 0x00);
218 /* CTL */
219 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221 pipe2name(mdp5_plane_pipe(plane)), stage);
222 }
223
224 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
226
227out:
228 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
06c0dd96
RC
229}
230
ed851963 231static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
06c0dd96
RC
232{
233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0deed25b 235 unsigned long flags;
ed851963
RC
236 struct drm_display_mode *mode;
237
238 if (WARN_ON(!crtc->state))
239 return;
06c0dd96 240
ed851963 241 mode = &crtc->state->adjusted_mode;
06c0dd96
RC
242
243 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
244 mdp5_crtc->name, mode->base.id, mode->name,
245 mode->vrefresh, mode->clock,
246 mode->hdisplay, mode->hsync_start,
247 mode->hsync_end, mode->htotal,
248 mode->vdisplay, mode->vsync_start,
249 mode->vsync_end, mode->vtotal,
250 mode->type, mode->flags);
251
0deed25b
SV
252 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
253 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
06c0dd96
RC
254 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
255 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
0deed25b 256 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
06c0dd96
RC
257}
258
259static void mdp5_crtc_prepare(struct drm_crtc *crtc)
260{
261 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
262 DBG("%s", mdp5_crtc->name);
263 /* make sure we hold a ref to mdp clks while setting up mode: */
264 mdp5_enable(get_kms(crtc));
265 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
266}
267
268static void mdp5_crtc_commit(struct drm_crtc *crtc)
269{
ed851963
RC
270 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271 DBG("%s", mdp5_crtc->name);
06c0dd96 272 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
0deed25b 273 crtc_flush_all(crtc);
06c0dd96
RC
274 /* drop the ref to mdp clk's that we got in prepare: */
275 mdp5_disable(get_kms(crtc));
276}
277
ed851963 278static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
06c0dd96 279{
06c0dd96
RC
280}
281
ed851963
RC
282struct plane_state {
283 struct drm_plane *plane;
284 struct mdp5_plane_state *state;
285};
286
287static int pstate_cmp(const void *a, const void *b)
06c0dd96 288{
ed851963
RC
289 struct plane_state *pa = (struct plane_state *)a;
290 struct plane_state *pb = (struct plane_state *)b;
291 return pa->state->zpos - pb->state->zpos;
06c0dd96
RC
292}
293
ed851963
RC
294static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
295 struct drm_crtc_state *state)
0deed25b
SV
296{
297 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
ed851963
RC
298 struct mdp5_kms *mdp5_kms = get_kms(crtc);
299 struct drm_plane *plane;
300 struct drm_device *dev = crtc->dev;
301 struct plane_state pstates[STAGE3 + 1];
302 int cnt = 0, i;
0deed25b 303
ed851963 304 DBG("%s: check", mdp5_crtc->name);
0deed25b 305
ed851963
RC
306 /* request a free CTL, if none is already allocated for this CRTC */
307 if (state->enable && !mdp5_crtc->ctl) {
308 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
309 if (WARN_ON(!mdp5_crtc->ctl))
310 return -EINVAL;
0deed25b 311 }
ed851963
RC
312
313 /* verify that there are not too many planes attached to crtc
314 * and that we don't have conflicting mixer stages:
315 */
93b02beb 316 drm_atomic_crtc_state_for_each_plane(plane, state) {
ed851963
RC
317 struct drm_plane_state *pstate;
318
319 if (cnt >= ARRAY_SIZE(pstates)) {
320 dev_err(dev->dev, "too many planes!\n");
321 return -EINVAL;
322 }
323
324 pstate = state->state->plane_states[drm_plane_index(plane)];
325
326 /* plane might not have changed, in which case take
327 * current state:
328 */
329 if (!pstate)
330 pstate = plane->state;
331
332 pstates[cnt].plane = plane;
333 pstates[cnt].state = to_mdp5_plane_state(pstate);
334
335 cnt++;
336 }
337
338 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
339
340 for (i = 0; i < cnt; i++) {
341 pstates[i].state->stage = STAGE_BASE + i;
342 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
343 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
344 pstates[i].state->stage);
345 }
346
347 return 0;
0deed25b
SV
348}
349
ed851963
RC
350static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
351{
352 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
353 DBG("%s: begin", mdp5_crtc->name);
354}
0deed25b 355
ed851963 356static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
06c0dd96
RC
357{
358 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
359 struct drm_device *dev = crtc->dev;
06c0dd96
RC
360 unsigned long flags;
361
f86afecf 362 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
06c0dd96 363
ed851963 364 WARN_ON(mdp5_crtc->event);
06c0dd96
RC
365
366 spin_lock_irqsave(&dev->event_lock, flags);
ed851963 367 mdp5_crtc->event = crtc->state->event;
06c0dd96
RC
368 spin_unlock_irqrestore(&dev->event_lock, flags);
369
ed851963
RC
370 blend_setup(crtc);
371 crtc_flush_all(crtc);
372 request_pending(crtc, PENDING_FLIP);
06c0dd96 373
ed851963
RC
374 if (mdp5_crtc->ctl && !crtc->state->enable) {
375 mdp5_ctl_release(mdp5_crtc->ctl);
376 mdp5_crtc->ctl = NULL;
377 }
06c0dd96
RC
378}
379
380static int mdp5_crtc_set_property(struct drm_crtc *crtc,
381 struct drm_property *property, uint64_t val)
382{
383 // XXX
384 return -EINVAL;
385}
386
387static const struct drm_crtc_funcs mdp5_crtc_funcs = {
ed851963 388 .set_config = drm_atomic_helper_set_config,
06c0dd96 389 .destroy = mdp5_crtc_destroy,
ed851963 390 .page_flip = drm_atomic_helper_page_flip,
06c0dd96 391 .set_property = mdp5_crtc_set_property,
ed851963
RC
392 .reset = drm_atomic_helper_crtc_reset,
393 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
394 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
06c0dd96
RC
395};
396
397static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
398 .dpms = mdp5_crtc_dpms,
399 .mode_fixup = mdp5_crtc_mode_fixup,
ed851963
RC
400 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
401 .mode_set = drm_helper_crtc_mode_set,
402 .mode_set_base = drm_helper_crtc_mode_set_base,
06c0dd96
RC
403 .prepare = mdp5_crtc_prepare,
404 .commit = mdp5_crtc_commit,
06c0dd96 405 .load_lut = mdp5_crtc_load_lut,
ed851963
RC
406 .atomic_check = mdp5_crtc_atomic_check,
407 .atomic_begin = mdp5_crtc_atomic_begin,
408 .atomic_flush = mdp5_crtc_atomic_flush,
06c0dd96
RC
409};
410
411static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
412{
413 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
414 struct drm_crtc *crtc = &mdp5_crtc->base;
06c0dd96
RC
415 unsigned pending;
416
417 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
418
419 pending = atomic_xchg(&mdp5_crtc->pending, 0);
420
421 if (pending & PENDING_FLIP) {
422 complete_flip(crtc, NULL);
06c0dd96
RC
423 }
424}
425
426static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
427{
428 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
0deed25b 429
06c0dd96 430 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
06c0dd96
RC
431}
432
433uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
434{
435 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
436 return mdp5_crtc->vblank.irqmask;
437}
438
439void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
440{
441 DBG("cancel: %p", file);
442 complete_flip(crtc, file);
443}
444
445/* set interface for routing crtc->encoder: */
446void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
447 enum mdp5_intf intf_id)
448{
449 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
450 struct mdp5_kms *mdp5_kms = get_kms(crtc);
0deed25b 451 uint32_t flush_mask = 0;
06c0dd96 452 uint32_t intf_sel;
0deed25b 453 unsigned long flags;
06c0dd96
RC
454
455 /* now that we know what irq's we want: */
456 mdp5_crtc->err.irqmask = intf2err(intf);
457 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
8bc1fe92 458 mdp_irq_update(&mdp5_kms->base);
06c0dd96 459
0deed25b 460 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
06c0dd96
RC
461 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
462
463 switch (intf) {
464 case 0:
465 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
466 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
467 break;
468 case 1:
469 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
470 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
471 break;
472 case 2:
473 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
474 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
475 break;
476 case 3:
477 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
478 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
479 break;
480 default:
481 BUG();
482 break;
483 }
484
0deed25b
SV
485 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
486 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
06c0dd96
RC
487
488 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
0deed25b
SV
489 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
490 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
491 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
06c0dd96 492
0deed25b
SV
493 crtc_flush(crtc, flush_mask);
494}
06c0dd96 495
0deed25b
SV
496int mdp5_crtc_get_lm(struct drm_crtc *crtc)
497{
498 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
499
500 if (WARN_ON(!crtc))
501 return -EINVAL;
502
503 return mdp5_crtc->lm;
504}
505
06c0dd96
RC
506/* initialize crtc */
507struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
508 struct drm_plane *plane, int id)
509{
510 struct drm_crtc *crtc = NULL;
511 struct mdp5_crtc *mdp5_crtc;
06c0dd96
RC
512
513 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
d7f8db53
BB
514 if (!mdp5_crtc)
515 return ERR_PTR(-ENOMEM);
06c0dd96
RC
516
517 crtc = &mdp5_crtc->base;
518
06c0dd96 519 mdp5_crtc->id = id;
0deed25b
SV
520 mdp5_crtc->lm = GET_LM_ID(id);
521
522 spin_lock_init(&mdp5_crtc->lm_lock);
06c0dd96
RC
523
524 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
525 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
526
527 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
528 pipe2name(mdp5_plane_pipe(plane)), id);
529
2d82d188 530 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
06c0dd96 531 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
a8cecf33 532 plane->crtc = crtc;
06c0dd96 533
8845ef80 534 mdp5_plane_install_properties(plane, &crtc->base);
06c0dd96
RC
535
536 return crtc;
06c0dd96 537}
This page took 0.147764 seconds and 5 git commands to generate.