MAINTAINERS: Add phy-miphy28lp.c and phy-miphy365x.c to ARCH/STI architecture
[deliverable/linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
1 /*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "mdp5_kms.h"
20
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
23 #include "drm_crtc.h"
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
26
27 #define CURSOR_WIDTH 64
28 #define CURSOR_HEIGHT 64
29
30 #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
31
32 struct mdp5_crtc {
33 struct drm_crtc base;
34 char name[8];
35 int id;
36 bool enabled;
37
38 /* layer mixer used for this CRTC (+ its lock): */
39 #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
40 int lm;
41 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
42
43 /* CTL used for this CRTC: */
44 struct mdp5_ctl *ctl;
45
46 /* if there is a pending flip, these will be non-null: */
47 struct drm_pending_vblank_event *event;
48
49 #define PENDING_CURSOR 0x1
50 #define PENDING_FLIP 0x2
51 atomic_t pending;
52
53 /* for unref'ing cursor bo's after scanout completes: */
54 struct drm_flip_work unref_cursor_work;
55
56 struct mdp_irq vblank;
57 struct mdp_irq err;
58
59 struct {
60 /* protect REG_MDP5_LM_CURSOR* registers and cursor scanout_bo*/
61 spinlock_t lock;
62
63 /* current cursor being scanned out: */
64 struct drm_gem_object *scanout_bo;
65 uint32_t width;
66 uint32_t height;
67 } cursor;
68 };
69 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
70
71 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
72 {
73 struct msm_drm_private *priv = crtc->dev->dev_private;
74 return to_mdp5_kms(to_mdp_kms(priv->kms));
75 }
76
77 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
78 {
79 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
80
81 atomic_or(pending, &mdp5_crtc->pending);
82 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
83 }
84
85 #define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
86
87 static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
88 {
89 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
90
91 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
92 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
93 }
94
95 /*
96 * flush updates, to make sure hw is updated to new scanout fb,
97 * so that we can safely queue unref to current fb (ie. next
98 * vblank we know hw is done w/ previous scanout_fb).
99 */
100 static void crtc_flush_all(struct drm_crtc *crtc)
101 {
102 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
103 struct drm_plane *plane;
104 uint32_t flush_mask = 0;
105
106 /* we could have already released CTL in the disable path: */
107 if (!mdp5_crtc->ctl)
108 return;
109
110 drm_atomic_crtc_for_each_plane(plane, crtc) {
111 flush_mask |= mdp5_plane_get_flush(plane);
112 }
113 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
114 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
115
116 crtc_flush(crtc, flush_mask);
117 }
118
119 /* if file!=NULL, this is preclose potential cancel-flip path */
120 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
121 {
122 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
123 struct drm_device *dev = crtc->dev;
124 struct drm_pending_vblank_event *event;
125 struct drm_plane *plane;
126 unsigned long flags;
127
128 spin_lock_irqsave(&dev->event_lock, flags);
129 event = mdp5_crtc->event;
130 if (event) {
131 /* if regular vblank case (!file) or if cancel-flip from
132 * preclose on file that requested flip, then send the
133 * event:
134 */
135 if (!file || (event->base.file_priv == file)) {
136 mdp5_crtc->event = NULL;
137 DBG("%s: send event: %p", mdp5_crtc->name, event);
138 drm_send_vblank_event(dev, mdp5_crtc->id, event);
139 }
140 }
141 spin_unlock_irqrestore(&dev->event_lock, flags);
142
143 drm_atomic_crtc_for_each_plane(plane, crtc) {
144 mdp5_plane_complete_flip(plane);
145 }
146 }
147
148 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
149 {
150 struct mdp5_crtc *mdp5_crtc =
151 container_of(work, struct mdp5_crtc, unref_cursor_work);
152 struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
153
154 msm_gem_put_iova(val, mdp5_kms->id);
155 drm_gem_object_unreference_unlocked(val);
156 }
157
158 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
159 {
160 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
161
162 drm_crtc_cleanup(crtc);
163 drm_flip_work_cleanup(&mdp5_crtc->unref_cursor_work);
164
165 kfree(mdp5_crtc);
166 }
167
168 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
169 const struct drm_display_mode *mode,
170 struct drm_display_mode *adjusted_mode)
171 {
172 return true;
173 }
174
175 /*
176 * blend_setup() - blend all the planes of a CRTC
177 *
178 * When border is enabled, the border color will ALWAYS be the base layer.
179 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
180 * If disabled, the first plane starts at STAGE_BASE.
181 *
182 * Note:
183 * Border is not enabled here because the private plane is exactly
184 * the CRTC resolution.
185 */
186 static void blend_setup(struct drm_crtc *crtc)
187 {
188 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
189 struct mdp5_kms *mdp5_kms = get_kms(crtc);
190 struct drm_plane *plane;
191 const struct mdp5_cfg_hw *hw_cfg;
192 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
193 unsigned long flags;
194 #define blender(stage) ((stage) - STAGE_BASE)
195
196 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
197
198 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
199
200 /* ctl could be released already when we are shutting down: */
201 if (!mdp5_crtc->ctl)
202 goto out;
203
204 drm_atomic_crtc_for_each_plane(plane, crtc) {
205 enum mdp_mixer_stage_id stage =
206 to_mdp5_plane_state(plane->state)->stage;
207
208 /*
209 * Note: This cannot happen with current implementation but
210 * we need to check this condition once z property is added
211 */
212 BUG_ON(stage > hw_cfg->lm.nb_stages);
213
214 /* LM */
215 mdp5_write(mdp5_kms,
216 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
217 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
218 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
219 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
220 blender(stage)), 0xff);
221 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
222 blender(stage)), 0x00);
223 /* CTL */
224 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
225 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
226 pipe2name(mdp5_plane_pipe(plane)), stage);
227 }
228
229 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
230 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
231
232 out:
233 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
234 }
235
236 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
237 {
238 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
239 struct mdp5_kms *mdp5_kms = get_kms(crtc);
240 unsigned long flags;
241 struct drm_display_mode *mode;
242
243 if (WARN_ON(!crtc->state))
244 return;
245
246 mode = &crtc->state->adjusted_mode;
247
248 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
249 mdp5_crtc->name, mode->base.id, mode->name,
250 mode->vrefresh, mode->clock,
251 mode->hdisplay, mode->hsync_start,
252 mode->hsync_end, mode->htotal,
253 mode->vdisplay, mode->vsync_start,
254 mode->vsync_end, mode->vtotal,
255 mode->type, mode->flags);
256
257 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
258 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
259 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
260 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
261 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
262 }
263
264 static void mdp5_crtc_disable(struct drm_crtc *crtc)
265 {
266 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
267 struct mdp5_kms *mdp5_kms = get_kms(crtc);
268
269 DBG("%s", mdp5_crtc->name);
270
271 if (WARN_ON(!mdp5_crtc->enabled))
272 return;
273
274 /* set STAGE_UNUSED for all layers */
275 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
276
277 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
278 mdp5_disable(mdp5_kms);
279
280 mdp5_crtc->enabled = false;
281 }
282
283 static void mdp5_crtc_enable(struct drm_crtc *crtc)
284 {
285 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
286 struct mdp5_kms *mdp5_kms = get_kms(crtc);
287
288 DBG("%s", mdp5_crtc->name);
289
290 if (WARN_ON(mdp5_crtc->enabled))
291 return;
292
293 mdp5_enable(mdp5_kms);
294 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
295
296 crtc_flush_all(crtc);
297
298 mdp5_crtc->enabled = true;
299 }
300
301 struct plane_state {
302 struct drm_plane *plane;
303 struct mdp5_plane_state *state;
304 };
305
306 static int pstate_cmp(const void *a, const void *b)
307 {
308 struct plane_state *pa = (struct plane_state *)a;
309 struct plane_state *pb = (struct plane_state *)b;
310 return pa->state->zpos - pb->state->zpos;
311 }
312
313 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
314 struct drm_crtc_state *state)
315 {
316 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
317 struct mdp5_kms *mdp5_kms = get_kms(crtc);
318 struct drm_plane *plane;
319 struct drm_device *dev = crtc->dev;
320 struct plane_state pstates[STAGE3 + 1];
321 int cnt = 0, i;
322
323 DBG("%s: check", mdp5_crtc->name);
324
325 /* request a free CTL, if none is already allocated for this CRTC */
326 if (state->enable && !mdp5_crtc->ctl) {
327 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
328 if (WARN_ON(!mdp5_crtc->ctl))
329 return -EINVAL;
330 }
331
332 /* verify that there are not too many planes attached to crtc
333 * and that we don't have conflicting mixer stages:
334 */
335 drm_atomic_crtc_state_for_each_plane(plane, state) {
336 struct drm_plane_state *pstate;
337
338 if (cnt >= ARRAY_SIZE(pstates)) {
339 dev_err(dev->dev, "too many planes!\n");
340 return -EINVAL;
341 }
342
343 pstate = state->state->plane_states[drm_plane_index(plane)];
344
345 /* plane might not have changed, in which case take
346 * current state:
347 */
348 if (!pstate)
349 pstate = plane->state;
350
351 pstates[cnt].plane = plane;
352 pstates[cnt].state = to_mdp5_plane_state(pstate);
353
354 cnt++;
355 }
356
357 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
358
359 for (i = 0; i < cnt; i++) {
360 pstates[i].state->stage = STAGE_BASE + i;
361 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
362 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
363 pstates[i].state->stage);
364 }
365
366 return 0;
367 }
368
369 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
370 {
371 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
372 DBG("%s: begin", mdp5_crtc->name);
373 }
374
375 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
376 {
377 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
378 struct drm_device *dev = crtc->dev;
379 unsigned long flags;
380
381 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
382
383 WARN_ON(mdp5_crtc->event);
384
385 spin_lock_irqsave(&dev->event_lock, flags);
386 mdp5_crtc->event = crtc->state->event;
387 spin_unlock_irqrestore(&dev->event_lock, flags);
388
389 blend_setup(crtc);
390 crtc_flush_all(crtc);
391 request_pending(crtc, PENDING_FLIP);
392
393 if (mdp5_crtc->ctl && !crtc->state->enable) {
394 mdp5_ctl_release(mdp5_crtc->ctl);
395 mdp5_crtc->ctl = NULL;
396 }
397 }
398
399 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
400 struct drm_property *property, uint64_t val)
401 {
402 // XXX
403 return -EINVAL;
404 }
405
406 static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
407 struct drm_file *file, uint32_t handle,
408 uint32_t width, uint32_t height)
409 {
410 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
411 struct drm_device *dev = crtc->dev;
412 struct mdp5_kms *mdp5_kms = get_kms(crtc);
413 struct drm_gem_object *cursor_bo, *old_bo;
414 uint32_t blendcfg, cursor_addr, stride;
415 int ret, bpp, lm;
416 unsigned int depth;
417 enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
418 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
419 unsigned long flags;
420
421 if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
422 dev_err(dev->dev, "bad cursor size: %dx%d\n", width, height);
423 return -EINVAL;
424 }
425
426 if (NULL == mdp5_crtc->ctl)
427 return -EINVAL;
428
429 if (!handle) {
430 DBG("Cursor off");
431 return mdp5_ctl_set_cursor(mdp5_crtc->ctl, false);
432 }
433
434 cursor_bo = drm_gem_object_lookup(dev, file, handle);
435 if (!cursor_bo)
436 return -ENOENT;
437
438 ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
439 if (ret)
440 return -EINVAL;
441
442 lm = mdp5_crtc->lm;
443 drm_fb_get_bpp_depth(DRM_FORMAT_ARGB8888, &depth, &bpp);
444 stride = width * (bpp >> 3);
445
446 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
447 old_bo = mdp5_crtc->cursor.scanout_bo;
448
449 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
450 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
451 MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
452 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_IMG_SIZE(lm),
453 MDP5_LM_CURSOR_IMG_SIZE_SRC_H(height) |
454 MDP5_LM_CURSOR_IMG_SIZE_SRC_W(width));
455 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
456 MDP5_LM_CURSOR_SIZE_ROI_H(height) |
457 MDP5_LM_CURSOR_SIZE_ROI_W(width));
458 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm), cursor_addr);
459
460
461 blendcfg = MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_EN;
462 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_TRANSP_EN;
463 blendcfg |= MDP5_LM_CURSOR_BLEND_CONFIG_BLEND_ALPHA_SEL(cur_alpha);
464 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BLEND_CONFIG(lm), blendcfg);
465
466 mdp5_crtc->cursor.scanout_bo = cursor_bo;
467 mdp5_crtc->cursor.width = width;
468 mdp5_crtc->cursor.height = height;
469 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
470
471 ret = mdp5_ctl_set_cursor(mdp5_crtc->ctl, true);
472 if (ret)
473 goto end;
474
475 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
476 crtc_flush(crtc, flush_mask);
477
478 end:
479 if (old_bo) {
480 drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
481 /* enable vblank to complete cursor work: */
482 request_pending(crtc, PENDING_CURSOR);
483 }
484 return ret;
485 }
486
487 static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
488 {
489 struct mdp5_kms *mdp5_kms = get_kms(crtc);
490 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
491 uint32_t flush_mask = mdp_ctl_flush_mask_cursor(0);
492 uint32_t xres = crtc->mode.hdisplay;
493 uint32_t yres = crtc->mode.vdisplay;
494 uint32_t roi_w;
495 uint32_t roi_h;
496 unsigned long flags;
497
498 x = (x > 0) ? x : 0;
499 y = (y > 0) ? y : 0;
500
501 /*
502 * Cursor Region Of Interest (ROI) is a plane read from cursor
503 * buffer to render. The ROI region is determined by the visiblity of
504 * the cursor point. In the default Cursor image the cursor point will
505 * be at the top left of the cursor image, unless it is specified
506 * otherwise using hotspot feature.
507 *
508 * If the cursor point reaches the right (xres - x < cursor.width) or
509 * bottom (yres - y < cursor.height) boundary of the screen, then ROI
510 * width and ROI height need to be evaluated to crop the cursor image
511 * accordingly.
512 * (xres-x) will be new cursor width when x > (xres - cursor.width)
513 * (yres-y) will be new cursor height when y > (yres - cursor.height)
514 */
515 roi_w = min(mdp5_crtc->cursor.width, xres - x);
516 roi_h = min(mdp5_crtc->cursor.height, yres - y);
517
518 spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
519 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(mdp5_crtc->lm),
520 MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
521 MDP5_LM_CURSOR_SIZE_ROI_W(roi_w));
522 mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(mdp5_crtc->lm),
523 MDP5_LM_CURSOR_START_XY_Y_START(y) |
524 MDP5_LM_CURSOR_START_XY_X_START(x));
525 spin_unlock_irqrestore(&mdp5_crtc->cursor.lock, flags);
526
527 crtc_flush(crtc, flush_mask);
528
529 return 0;
530 }
531
532 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
533 .set_config = drm_atomic_helper_set_config,
534 .destroy = mdp5_crtc_destroy,
535 .page_flip = drm_atomic_helper_page_flip,
536 .set_property = mdp5_crtc_set_property,
537 .reset = drm_atomic_helper_crtc_reset,
538 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
539 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
540 .cursor_set = mdp5_crtc_cursor_set,
541 .cursor_move = mdp5_crtc_cursor_move,
542 };
543
544 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
545 .mode_fixup = mdp5_crtc_mode_fixup,
546 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
547 .prepare = mdp5_crtc_disable,
548 .commit = mdp5_crtc_enable,
549 .atomic_check = mdp5_crtc_atomic_check,
550 .atomic_begin = mdp5_crtc_atomic_begin,
551 .atomic_flush = mdp5_crtc_atomic_flush,
552 };
553
554 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
555 {
556 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
557 struct drm_crtc *crtc = &mdp5_crtc->base;
558 struct msm_drm_private *priv = crtc->dev->dev_private;
559 unsigned pending;
560
561 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
562
563 pending = atomic_xchg(&mdp5_crtc->pending, 0);
564
565 if (pending & PENDING_FLIP) {
566 complete_flip(crtc, NULL);
567 }
568
569 if (pending & PENDING_CURSOR)
570 drm_flip_work_commit(&mdp5_crtc->unref_cursor_work, priv->wq);
571 }
572
573 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
574 {
575 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
576
577 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
578 }
579
580 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
581 {
582 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
583 return mdp5_crtc->vblank.irqmask;
584 }
585
586 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
587 {
588 DBG("cancel: %p", file);
589 complete_flip(crtc, file);
590 }
591
592 /* set interface for routing crtc->encoder: */
593 void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
594 enum mdp5_intf intf_id)
595 {
596 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
597 struct mdp5_kms *mdp5_kms = get_kms(crtc);
598 uint32_t flush_mask = 0;
599 uint32_t intf_sel;
600 unsigned long flags;
601
602 /* now that we know what irq's we want: */
603 mdp5_crtc->err.irqmask = intf2err(intf);
604 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
605 mdp_irq_update(&mdp5_kms->base);
606
607 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
608 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
609
610 switch (intf) {
611 case 0:
612 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
613 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
614 break;
615 case 1:
616 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
617 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
618 break;
619 case 2:
620 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
621 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
622 break;
623 case 3:
624 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
625 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
626 break;
627 default:
628 BUG();
629 break;
630 }
631
632 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
633 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
634
635 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
636 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
637 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
638 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
639
640 crtc_flush(crtc, flush_mask);
641 }
642
643 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
644 {
645 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
646
647 if (WARN_ON(!crtc))
648 return -EINVAL;
649
650 return mdp5_crtc->lm;
651 }
652
653 /* initialize crtc */
654 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
655 struct drm_plane *plane, int id)
656 {
657 struct drm_crtc *crtc = NULL;
658 struct mdp5_crtc *mdp5_crtc;
659
660 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
661 if (!mdp5_crtc)
662 return ERR_PTR(-ENOMEM);
663
664 crtc = &mdp5_crtc->base;
665
666 mdp5_crtc->id = id;
667 mdp5_crtc->lm = GET_LM_ID(id);
668
669 spin_lock_init(&mdp5_crtc->lm_lock);
670 spin_lock_init(&mdp5_crtc->cursor.lock);
671
672 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
673 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
674
675 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
676 pipe2name(mdp5_plane_pipe(plane)), id);
677
678 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
679
680 drm_flip_work_init(&mdp5_crtc->unref_cursor_work,
681 "unref cursor", unref_cursor_worker);
682
683 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
684 plane->crtc = crtc;
685
686 mdp5_plane_install_properties(plane, &crtc->base);
687
688 return crtc;
689 }
This page took 0.04648 seconds and 5 git commands to generate.