930bcec7067f8dfb272dafb7839358902e606b87
[deliverable/linux.git] / drivers / gpu / drm / msm / mdp / mdp5 / mdp5_crtc.c
1 /*
2 * Copyright (c) 2014 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2013 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published by
8 * the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19 #include "mdp5_kms.h"
20
21 #include <linux/sort.h>
22 #include <drm/drm_mode.h>
23 #include "drm_crtc.h"
24 #include "drm_crtc_helper.h"
25 #include "drm_flip_work.h"
26
27 #define SSPP_MAX (SSPP_RGB3 + 1) /* TODO: Add SSPP_MAX in mdp5.xml.h */
28
29 struct mdp5_crtc {
30 struct drm_crtc base;
31 char name[8];
32 int id;
33 bool enabled;
34
35 /* layer mixer used for this CRTC (+ its lock): */
36 #define GET_LM_ID(crtc_id) ((crtc_id == 3) ? 5 : crtc_id)
37 int lm;
38 spinlock_t lm_lock; /* protect REG_MDP5_LM_* registers */
39
40 /* CTL used for this CRTC: */
41 struct mdp5_ctl *ctl;
42
43 /* if there is a pending flip, these will be non-null: */
44 struct drm_pending_vblank_event *event;
45
46 #define PENDING_CURSOR 0x1
47 #define PENDING_FLIP 0x2
48 atomic_t pending;
49
50 struct mdp_irq vblank;
51 struct mdp_irq err;
52 };
53 #define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
54
55 static struct mdp5_kms *get_kms(struct drm_crtc *crtc)
56 {
57 struct msm_drm_private *priv = crtc->dev->dev_private;
58 return to_mdp5_kms(to_mdp_kms(priv->kms));
59 }
60
61 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
62 {
63 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
64
65 atomic_or(pending, &mdp5_crtc->pending);
66 mdp_irq_register(&get_kms(crtc)->base, &mdp5_crtc->vblank);
67 }
68
69 #define mdp5_lm_get_flush(lm) mdp_ctl_flush_mask_lm(lm)
70
71 static void crtc_flush(struct drm_crtc *crtc, u32 flush_mask)
72 {
73 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
74
75 DBG("%s: flush=%08x", mdp5_crtc->name, flush_mask);
76 mdp5_ctl_commit(mdp5_crtc->ctl, flush_mask);
77 }
78
79 /*
80 * flush updates, to make sure hw is updated to new scanout fb,
81 * so that we can safely queue unref to current fb (ie. next
82 * vblank we know hw is done w/ previous scanout_fb).
83 */
84 static void crtc_flush_all(struct drm_crtc *crtc)
85 {
86 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
87 struct drm_plane *plane;
88 uint32_t flush_mask = 0;
89
90 /* we could have already released CTL in the disable path: */
91 if (!mdp5_crtc->ctl)
92 return;
93
94 drm_atomic_crtc_for_each_plane(plane, crtc) {
95 flush_mask |= mdp5_plane_get_flush(plane);
96 }
97 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
98 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
99
100 crtc_flush(crtc, flush_mask);
101 }
102
103 /* if file!=NULL, this is preclose potential cancel-flip path */
104 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
105 {
106 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
107 struct drm_device *dev = crtc->dev;
108 struct drm_pending_vblank_event *event;
109 struct drm_plane *plane;
110 unsigned long flags;
111
112 spin_lock_irqsave(&dev->event_lock, flags);
113 event = mdp5_crtc->event;
114 if (event) {
115 /* if regular vblank case (!file) or if cancel-flip from
116 * preclose on file that requested flip, then send the
117 * event:
118 */
119 if (!file || (event->base.file_priv == file)) {
120 mdp5_crtc->event = NULL;
121 DBG("%s: send event: %p", mdp5_crtc->name, event);
122 drm_send_vblank_event(dev, mdp5_crtc->id, event);
123 }
124 }
125 spin_unlock_irqrestore(&dev->event_lock, flags);
126
127 drm_atomic_crtc_for_each_plane(plane, crtc) {
128 mdp5_plane_complete_flip(plane);
129 }
130 }
131
132 static void mdp5_crtc_destroy(struct drm_crtc *crtc)
133 {
134 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
135
136 drm_crtc_cleanup(crtc);
137
138 kfree(mdp5_crtc);
139 }
140
141 static void mdp5_crtc_dpms(struct drm_crtc *crtc, int mode)
142 {
143 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
144 struct mdp5_kms *mdp5_kms = get_kms(crtc);
145 bool enabled = (mode == DRM_MODE_DPMS_ON);
146
147 DBG("%s: mode=%d", mdp5_crtc->name, mode);
148
149 if (enabled != mdp5_crtc->enabled) {
150 if (enabled) {
151 mdp5_enable(mdp5_kms);
152 mdp_irq_register(&mdp5_kms->base, &mdp5_crtc->err);
153 } else {
154 /* set STAGE_UNUSED for all layers */
155 mdp5_ctl_blend(mdp5_crtc->ctl, mdp5_crtc->lm, 0x00000000);
156 mdp_irq_unregister(&mdp5_kms->base, &mdp5_crtc->err);
157 mdp5_disable(mdp5_kms);
158 }
159 mdp5_crtc->enabled = enabled;
160 }
161 }
162
163 static bool mdp5_crtc_mode_fixup(struct drm_crtc *crtc,
164 const struct drm_display_mode *mode,
165 struct drm_display_mode *adjusted_mode)
166 {
167 return true;
168 }
169
170 /*
171 * blend_setup() - blend all the planes of a CRTC
172 *
173 * When border is enabled, the border color will ALWAYS be the base layer.
174 * Therefore, the first plane (private RGB pipe) will start at STAGE0.
175 * If disabled, the first plane starts at STAGE_BASE.
176 *
177 * Note:
178 * Border is not enabled here because the private plane is exactly
179 * the CRTC resolution.
180 */
181 static void blend_setup(struct drm_crtc *crtc)
182 {
183 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
184 struct mdp5_kms *mdp5_kms = get_kms(crtc);
185 struct drm_plane *plane;
186 const struct mdp5_cfg_hw *hw_cfg;
187 uint32_t lm = mdp5_crtc->lm, blend_cfg = 0;
188 unsigned long flags;
189 #define blender(stage) ((stage) - STAGE_BASE)
190
191 hw_cfg = mdp5_cfg_get_hw_config(mdp5_kms->cfg);
192
193 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
194
195 /* ctl could be released already when we are shutting down: */
196 if (!mdp5_crtc->ctl)
197 goto out;
198
199 drm_atomic_crtc_for_each_plane(plane, crtc) {
200 enum mdp_mixer_stage_id stage =
201 to_mdp5_plane_state(plane->state)->stage;
202
203 /*
204 * Note: This cannot happen with current implementation but
205 * we need to check this condition once z property is added
206 */
207 BUG_ON(stage > hw_cfg->lm.nb_stages);
208
209 /* LM */
210 mdp5_write(mdp5_kms,
211 REG_MDP5_LM_BLEND_OP_MODE(lm, blender(stage)),
212 MDP5_LM_BLEND_OP_MODE_FG_ALPHA(FG_CONST) |
213 MDP5_LM_BLEND_OP_MODE_BG_ALPHA(BG_CONST));
214 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_FG_ALPHA(lm,
215 blender(stage)), 0xff);
216 mdp5_write(mdp5_kms, REG_MDP5_LM_BLEND_BG_ALPHA(lm,
217 blender(stage)), 0x00);
218 /* CTL */
219 blend_cfg |= mdp_ctl_blend_mask(mdp5_plane_pipe(plane), stage);
220 DBG("%s: blending pipe %s on stage=%d", mdp5_crtc->name,
221 pipe2name(mdp5_plane_pipe(plane)), stage);
222 }
223
224 DBG("%s: lm%d: blend config = 0x%08x", mdp5_crtc->name, lm, blend_cfg);
225 mdp5_ctl_blend(mdp5_crtc->ctl, lm, blend_cfg);
226
227 out:
228 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
229 }
230
231 static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
232 {
233 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
234 struct mdp5_kms *mdp5_kms = get_kms(crtc);
235 unsigned long flags;
236 struct drm_display_mode *mode;
237
238 if (WARN_ON(!crtc->state))
239 return;
240
241 mode = &crtc->state->adjusted_mode;
242
243 DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
244 mdp5_crtc->name, mode->base.id, mode->name,
245 mode->vrefresh, mode->clock,
246 mode->hdisplay, mode->hsync_start,
247 mode->hsync_end, mode->htotal,
248 mode->vdisplay, mode->vsync_start,
249 mode->vsync_end, mode->vtotal,
250 mode->type, mode->flags);
251
252 spin_lock_irqsave(&mdp5_crtc->lm_lock, flags);
253 mdp5_write(mdp5_kms, REG_MDP5_LM_OUT_SIZE(mdp5_crtc->lm),
254 MDP5_LM_OUT_SIZE_WIDTH(mode->hdisplay) |
255 MDP5_LM_OUT_SIZE_HEIGHT(mode->vdisplay));
256 spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
257 }
258
259 static void mdp5_crtc_prepare(struct drm_crtc *crtc)
260 {
261 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
262 DBG("%s", mdp5_crtc->name);
263 /* make sure we hold a ref to mdp clks while setting up mode: */
264 mdp5_enable(get_kms(crtc));
265 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
266 }
267
268 static void mdp5_crtc_commit(struct drm_crtc *crtc)
269 {
270 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
271 DBG("%s", mdp5_crtc->name);
272 mdp5_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
273 crtc_flush_all(crtc);
274 /* drop the ref to mdp clk's that we got in prepare: */
275 mdp5_disable(get_kms(crtc));
276 }
277
278 static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
279 {
280 }
281
282 struct plane_state {
283 struct drm_plane *plane;
284 struct mdp5_plane_state *state;
285 };
286
287 static int pstate_cmp(const void *a, const void *b)
288 {
289 struct plane_state *pa = (struct plane_state *)a;
290 struct plane_state *pb = (struct plane_state *)b;
291 return pa->state->zpos - pb->state->zpos;
292 }
293
294 static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
295 struct drm_crtc_state *state)
296 {
297 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
298 struct mdp5_kms *mdp5_kms = get_kms(crtc);
299 struct drm_plane *plane;
300 struct drm_device *dev = crtc->dev;
301 struct plane_state pstates[STAGE3 + 1];
302 int cnt = 0, i;
303
304 DBG("%s: check", mdp5_crtc->name);
305
306 /* request a free CTL, if none is already allocated for this CRTC */
307 if (state->enable && !mdp5_crtc->ctl) {
308 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
309 if (WARN_ON(!mdp5_crtc->ctl))
310 return -EINVAL;
311 }
312
313 /* verify that there are not too many planes attached to crtc
314 * and that we don't have conflicting mixer stages:
315 */
316 drm_atomic_crtc_state_for_each_plane(plane, state) {
317 struct drm_plane_state *pstate;
318
319 if (cnt >= ARRAY_SIZE(pstates)) {
320 dev_err(dev->dev, "too many planes!\n");
321 return -EINVAL;
322 }
323
324 pstate = state->state->plane_states[drm_plane_index(plane)];
325
326 /* plane might not have changed, in which case take
327 * current state:
328 */
329 if (!pstate)
330 pstate = plane->state;
331
332 pstates[cnt].plane = plane;
333 pstates[cnt].state = to_mdp5_plane_state(pstate);
334
335 cnt++;
336 }
337
338 sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
339
340 for (i = 0; i < cnt; i++) {
341 pstates[i].state->stage = STAGE_BASE + i;
342 DBG("%s: assign pipe %s on stage=%d", mdp5_crtc->name,
343 pipe2name(mdp5_plane_pipe(pstates[i].plane)),
344 pstates[i].state->stage);
345 }
346
347 return 0;
348 }
349
350 static void mdp5_crtc_atomic_begin(struct drm_crtc *crtc)
351 {
352 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
353 DBG("%s: begin", mdp5_crtc->name);
354 }
355
356 static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
357 {
358 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
359 struct drm_device *dev = crtc->dev;
360 unsigned long flags;
361
362 DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
363
364 WARN_ON(mdp5_crtc->event);
365
366 spin_lock_irqsave(&dev->event_lock, flags);
367 mdp5_crtc->event = crtc->state->event;
368 spin_unlock_irqrestore(&dev->event_lock, flags);
369
370 blend_setup(crtc);
371 crtc_flush_all(crtc);
372 request_pending(crtc, PENDING_FLIP);
373
374 if (mdp5_crtc->ctl && !crtc->state->enable) {
375 mdp5_ctl_release(mdp5_crtc->ctl);
376 mdp5_crtc->ctl = NULL;
377 }
378 }
379
380 static int mdp5_crtc_set_property(struct drm_crtc *crtc,
381 struct drm_property *property, uint64_t val)
382 {
383 // XXX
384 return -EINVAL;
385 }
386
387 static const struct drm_crtc_funcs mdp5_crtc_funcs = {
388 .set_config = drm_atomic_helper_set_config,
389 .destroy = mdp5_crtc_destroy,
390 .page_flip = drm_atomic_helper_page_flip,
391 .set_property = mdp5_crtc_set_property,
392 .reset = drm_atomic_helper_crtc_reset,
393 .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
394 .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
395 };
396
397 static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
398 .dpms = mdp5_crtc_dpms,
399 .mode_fixup = mdp5_crtc_mode_fixup,
400 .mode_set_nofb = mdp5_crtc_mode_set_nofb,
401 .mode_set = drm_helper_crtc_mode_set,
402 .mode_set_base = drm_helper_crtc_mode_set_base,
403 .prepare = mdp5_crtc_prepare,
404 .commit = mdp5_crtc_commit,
405 .load_lut = mdp5_crtc_load_lut,
406 .atomic_check = mdp5_crtc_atomic_check,
407 .atomic_begin = mdp5_crtc_atomic_begin,
408 .atomic_flush = mdp5_crtc_atomic_flush,
409 };
410
411 static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
412 {
413 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, vblank);
414 struct drm_crtc *crtc = &mdp5_crtc->base;
415 unsigned pending;
416
417 mdp_irq_unregister(&get_kms(crtc)->base, &mdp5_crtc->vblank);
418
419 pending = atomic_xchg(&mdp5_crtc->pending, 0);
420
421 if (pending & PENDING_FLIP) {
422 complete_flip(crtc, NULL);
423 }
424 }
425
426 static void mdp5_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
427 {
428 struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, err);
429
430 DBG("%s: error: %08x", mdp5_crtc->name, irqstatus);
431 }
432
433 uint32_t mdp5_crtc_vblank(struct drm_crtc *crtc)
434 {
435 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
436 return mdp5_crtc->vblank.irqmask;
437 }
438
439 void mdp5_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file)
440 {
441 DBG("cancel: %p", file);
442 complete_flip(crtc, file);
443 }
444
445 /* set interface for routing crtc->encoder: */
446 void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
447 enum mdp5_intf intf_id)
448 {
449 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
450 struct mdp5_kms *mdp5_kms = get_kms(crtc);
451 uint32_t flush_mask = 0;
452 uint32_t intf_sel;
453 unsigned long flags;
454
455 /* now that we know what irq's we want: */
456 mdp5_crtc->err.irqmask = intf2err(intf);
457 mdp5_crtc->vblank.irqmask = intf2vblank(intf);
458
459 /* when called from modeset_init(), skip the rest until later: */
460 if (!mdp5_kms)
461 return;
462
463 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
464 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
465
466 switch (intf) {
467 case 0:
468 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
469 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf_id);
470 break;
471 case 1:
472 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
473 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf_id);
474 break;
475 case 2:
476 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
477 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf_id);
478 break;
479 case 3:
480 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
481 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf_id);
482 break;
483 default:
484 BUG();
485 break;
486 }
487
488 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
489 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
490
491 DBG("%s: intf_sel=%08x", mdp5_crtc->name, intf_sel);
492 mdp5_ctl_set_intf(mdp5_crtc->ctl, intf);
493 flush_mask |= mdp5_ctl_get_flush(mdp5_crtc->ctl);
494 flush_mask |= mdp5_lm_get_flush(mdp5_crtc->lm);
495
496 crtc_flush(crtc, flush_mask);
497 }
498
499 int mdp5_crtc_get_lm(struct drm_crtc *crtc)
500 {
501 struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc);
502
503 if (WARN_ON(!crtc))
504 return -EINVAL;
505
506 return mdp5_crtc->lm;
507 }
508
509 /* initialize crtc */
510 struct drm_crtc *mdp5_crtc_init(struct drm_device *dev,
511 struct drm_plane *plane, int id)
512 {
513 struct drm_crtc *crtc = NULL;
514 struct mdp5_crtc *mdp5_crtc;
515
516 mdp5_crtc = kzalloc(sizeof(*mdp5_crtc), GFP_KERNEL);
517 if (!mdp5_crtc)
518 return ERR_PTR(-ENOMEM);
519
520 crtc = &mdp5_crtc->base;
521
522 mdp5_crtc->id = id;
523 mdp5_crtc->lm = GET_LM_ID(id);
524
525 spin_lock_init(&mdp5_crtc->lm_lock);
526
527 mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq;
528 mdp5_crtc->err.irq = mdp5_crtc_err_irq;
529
530 snprintf(mdp5_crtc->name, sizeof(mdp5_crtc->name), "%s:%d",
531 pipe2name(mdp5_plane_pipe(plane)), id);
532
533 drm_crtc_init_with_planes(dev, crtc, plane, NULL, &mdp5_crtc_funcs);
534 drm_crtc_helper_add(crtc, &mdp5_crtc_helper_funcs);
535 plane->crtc = crtc;
536
537 mdp5_plane_install_properties(plane, &crtc->base);
538
539 return crtc;
540 }
This page took 0.041075 seconds and 4 git commands to generate.