drm: rcar-du: Restart the DU group when a plane source changes
[deliverable/linux.git] / drivers / gpu / drm / rcar-du / rcar_du_kms.c
CommitLineData
4bf8e196
LP
1/*
2 * rcar_du_kms.c -- R-Car Display Unit Mode Setting
3 *
36d50464 4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
4bf8e196
LP
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14#include <drm/drmP.h>
8d3f9b22 15#include <drm/drm_atomic.h>
336d04a1 16#include <drm/drm_atomic_helper.h>
4bf8e196
LP
17#include <drm/drm_crtc.h>
18#include <drm/drm_crtc_helper.h>
19#include <drm/drm_fb_cma_helper.h>
20#include <drm/drm_gem_cma_helper.h>
21
96c02691 22#include <linux/of_graph.h>
8d3f9b22 23#include <linux/wait.h>
96c02691 24
4bf8e196
LP
25#include "rcar_du_crtc.h"
26#include "rcar_du_drv.h"
6978f123 27#include "rcar_du_encoder.h"
4bf8e196 28#include "rcar_du_kms.h"
90374b5c 29#include "rcar_du_lvdsenc.h"
4bf8e196 30#include "rcar_du_regs.h"
4bf8e196
LP
31
32/* -----------------------------------------------------------------------------
33 * Format helpers
34 */
35
36static const struct rcar_du_format_info rcar_du_format_infos[] = {
37 {
38 .fourcc = DRM_FORMAT_RGB565,
39 .bpp = 16,
40 .planes = 1,
41 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
42 .edf = PnDDCR4_EDF_NONE,
43 }, {
44 .fourcc = DRM_FORMAT_ARGB1555,
45 .bpp = 16,
46 .planes = 1,
47 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
48 .edf = PnDDCR4_EDF_NONE,
49 }, {
50 .fourcc = DRM_FORMAT_XRGB1555,
51 .bpp = 16,
52 .planes = 1,
53 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
54 .edf = PnDDCR4_EDF_NONE,
55 }, {
56 .fourcc = DRM_FORMAT_XRGB8888,
57 .bpp = 32,
58 .planes = 1,
59 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
60 .edf = PnDDCR4_EDF_RGB888,
61 }, {
62 .fourcc = DRM_FORMAT_ARGB8888,
63 .bpp = 32,
64 .planes = 1,
65 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
66 .edf = PnDDCR4_EDF_ARGB8888,
67 }, {
68 .fourcc = DRM_FORMAT_UYVY,
69 .bpp = 16,
70 .planes = 1,
71 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
72 .edf = PnDDCR4_EDF_NONE,
73 }, {
74 .fourcc = DRM_FORMAT_YUYV,
75 .bpp = 16,
76 .planes = 1,
77 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
78 .edf = PnDDCR4_EDF_NONE,
79 }, {
80 .fourcc = DRM_FORMAT_NV12,
81 .bpp = 12,
82 .planes = 2,
83 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
84 .edf = PnDDCR4_EDF_NONE,
85 }, {
86 .fourcc = DRM_FORMAT_NV21,
87 .bpp = 12,
88 .planes = 2,
89 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
90 .edf = PnDDCR4_EDF_NONE,
91 }, {
92 /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
93 .fourcc = DRM_FORMAT_NV16,
94 .bpp = 16,
95 .planes = 2,
96 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
97 .edf = PnDDCR4_EDF_NONE,
98 },
99};
100
101const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
102{
103 unsigned int i;
104
105 for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
106 if (rcar_du_format_infos[i].fourcc == fourcc)
107 return &rcar_du_format_infos[i];
108 }
109
110 return NULL;
111}
112
4bf8e196
LP
113/* -----------------------------------------------------------------------------
114 * Frame buffer
115 */
116
59e32642
LP
117int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
118 struct drm_mode_create_dumb *args)
119{
9e2d2de9 120 struct rcar_du_device *rcdu = dev->dev_private;
59e32642
LP
121 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
122 unsigned int align;
123
9e2d2de9
LP
124 /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
125 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
126 */
e8355e0d 127 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
9e2d2de9
LP
128 align = 128;
129 else
130 align = 16 * args->bpp / 8;
131
7e295a36 132 args->pitch = roundup(min_pitch, align);
59e32642 133
6d178291 134 return drm_gem_cma_dumb_create_internal(file, dev, args);
59e32642
LP
135}
136
4bf8e196
LP
137static struct drm_framebuffer *
138rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
1eb83451 139 const struct drm_mode_fb_cmd2 *mode_cmd)
4bf8e196 140{
9e2d2de9 141 struct rcar_du_device *rcdu = dev->dev_private;
4bf8e196 142 const struct rcar_du_format_info *format;
8bed5cc7 143 unsigned int max_pitch;
59e32642 144 unsigned int align;
8bed5cc7 145 unsigned int bpp;
4bf8e196
LP
146
147 format = rcar_du_format_info(mode_cmd->pixel_format);
148 if (format == NULL) {
149 dev_dbg(dev->dev, "unsupported pixel format %08x\n",
150 mode_cmd->pixel_format);
151 return ERR_PTR(-EINVAL);
152 }
153
8bed5cc7
LP
154 /*
155 * The pitch and alignment constraints are expressed in pixels on the
156 * hardware side and in bytes in the DRM API.
157 */
158 bpp = format->planes == 2 ? 1 : format->bpp / 8;
159 max_pitch = 4096 * bpp;
160
e8355e0d 161 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
9e2d2de9
LP
162 align = 128;
163 else
8bed5cc7 164 align = 16 * bpp;
59e32642
LP
165
166 if (mode_cmd->pitches[0] & (align - 1) ||
8bed5cc7 167 mode_cmd->pitches[0] >= max_pitch) {
4bf8e196
LP
168 dev_dbg(dev->dev, "invalid pitch value %u\n",
169 mode_cmd->pitches[0]);
170 return ERR_PTR(-EINVAL);
171 }
172
173 if (format->planes == 2) {
174 if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
175 dev_dbg(dev->dev,
176 "luma and chroma pitches do not match\n");
177 return ERR_PTR(-EINVAL);
178 }
179 }
180
181 return drm_fb_cma_create(dev, file_priv, mode_cmd);
182}
183
3864c6f4
LP
184static void rcar_du_output_poll_changed(struct drm_device *dev)
185{
186 struct rcar_du_device *rcdu = dev->dev_private;
187
188 drm_fbdev_cma_hotplug_event(rcdu->fbdev);
189}
190
8d3f9b22 191/* -----------------------------------------------------------------------------
5ee5a81d 192 * Atomic Check and Update
8d3f9b22
LP
193 */
194
5ee5a81d
LP
195/*
196 * Atomic hardware plane allocator
197 *
198 * The hardware plane allocator is solely based on the atomic plane states
199 * without keeping any external state to avoid races between .atomic_check()
200 * and .atomic_commit().
201 *
202 * The core idea is to avoid using a free planes bitmask that would need to be
203 * shared between check and commit handlers with a collective knowledge based on
204 * the allocated hardware plane(s) for each KMS plane. The allocator then loops
205 * over all plane states to compute the free planes bitmask, allocates hardware
206 * planes based on that bitmask, and stores the result back in the plane states.
207 *
208 * For this to work we need to access the current state of planes not touched by
209 * the atomic update. To ensure that it won't be modified, we need to lock all
210 * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
211 * updates from .atomic_check() up to completion (when swapping the states if
212 * the check step has succeeded) or rollback (when freeing the states if the
213 * check step has failed).
214 *
215 * Allocation is performed in the .atomic_check() handler and applied
216 * automatically when the core swaps the old and new states.
217 */
218
219static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
34a04f2b 220 struct rcar_du_plane_state *new_state)
5ee5a81d 221{
34a04f2b 222 struct rcar_du_plane_state *cur_state;
5ee5a81d 223
34a04f2b 224 cur_state = to_rcar_plane_state(plane->plane.state);
5ee5a81d
LP
225
226 /* Lowering the number of planes doesn't strictly require reallocation
227 * as the extra hardware plane will be freed when committing, but doing
228 * so could lead to more fragmentation.
229 */
34a04f2b
LP
230 if (!cur_state->format ||
231 cur_state->format->planes != new_state->format->planes)
232 return true;
233
234 /* Reallocate hardware planes if the source has changed. */
235 if (cur_state->source != new_state->source)
236 return true;
237
238 return false;
5ee5a81d
LP
239}
240
241static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
242{
243 unsigned int mask;
244
245 if (state->hwindex == -1)
246 return 0;
247
248 mask = 1 << state->hwindex;
249 if (state->format->planes == 2)
250 mask |= 1 << ((state->hwindex + 1) % 8);
251
252 return mask;
253}
254
af8ad962
LP
255/*
256 * The R8A7790 DU can source frames directly from the VSP1 devices VSPD0 and
257 * VSPD1. VSPD0 feeds DU0/1 plane 0, and VSPD1 feeds either DU2 plane 0 or
258 * DU0/1 plane 1.
259 *
260 * Allocate the correct fixed plane when sourcing frames from VSPD0 or VSPD1,
261 * and allocate planes in reverse index order otherwise to ensure maximum
262 * availability of planes 0 and 1.
263 *
264 * The caller is responsible for ensuring that the requested source is
265 * compatible with the DU revision.
266 */
267static int rcar_du_plane_hwalloc(struct rcar_du_plane *plane,
268 struct rcar_du_plane_state *state,
269 unsigned int free)
5ee5a81d 270{
af8ad962
LP
271 unsigned int num_planes = state->format->planes;
272 int fixed = -1;
273 int i;
274
275 if (state->source == RCAR_DU_PLANE_VSPD0) {
276 /* VSPD0 feeds plane 0 on DU0/1. */
277 if (plane->group->index != 0)
278 return -EINVAL;
279
280 fixed = 0;
281 } else if (state->source == RCAR_DU_PLANE_VSPD1) {
282 /* VSPD1 feeds plane 1 on DU0/1 or plane 0 on DU2. */
283 fixed = plane->group->index == 0 ? 1 : 0;
284 }
285
286 if (fixed >= 0)
287 return free & (1 << fixed) ? fixed : -EBUSY;
5ee5a81d 288
af8ad962 289 for (i = RCAR_DU_NUM_HW_PLANES - 1; i >= 0; --i) {
5ee5a81d
LP
290 if (!(free & (1 << i)))
291 continue;
292
293 if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
294 break;
295 }
296
af8ad962 297 return i < 0 ? -EBUSY : i;
5ee5a81d
LP
298}
299
300static int rcar_du_atomic_check(struct drm_device *dev,
301 struct drm_atomic_state *state)
302{
303 struct rcar_du_device *rcdu = dev->dev_private;
304 unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
305 unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
306 bool needs_realloc = false;
307 unsigned int groups = 0;
308 unsigned int i;
309 int ret;
310
311 ret = drm_atomic_helper_check(dev, state);
312 if (ret < 0)
313 return ret;
314
315 /* Check if hardware planes need to be reallocated. */
316 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
317 struct rcar_du_plane_state *plane_state;
318 struct rcar_du_plane *plane;
319 unsigned int index;
320
321 if (!state->planes[i])
322 continue;
323
324 plane = to_rcar_plane(state->planes[i]);
ec69a406 325 plane_state = to_rcar_plane_state(state->plane_states[i]);
5ee5a81d 326
08058611
LP
327 dev_dbg(rcdu->dev, "%s: checking plane (%u,%u)\n", __func__,
328 plane->group->index, plane - plane->group->planes);
329
5ee5a81d
LP
330 /* If the plane is being disabled we don't need to go through
331 * the full reallocation procedure. Just mark the hardware
332 * plane(s) as freed.
333 */
334 if (!plane_state->format) {
08058611
LP
335 dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
336 __func__);
99caede1 337 index = plane - plane->group->planes;
5ee5a81d
LP
338 group_freed_planes[plane->group->index] |= 1 << index;
339 plane_state->hwindex = -1;
340 continue;
341 }
342
343 /* If the plane needs to be reallocated mark it as such, and
344 * mark the hardware plane(s) as free.
345 */
346 if (rcar_du_plane_needs_realloc(plane, plane_state)) {
08058611
LP
347 dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
348 __func__);
5ee5a81d
LP
349 groups |= 1 << plane->group->index;
350 needs_realloc = true;
351
99caede1 352 index = plane - plane->group->planes;
5ee5a81d
LP
353 group_freed_planes[plane->group->index] |= 1 << index;
354 plane_state->hwindex = -1;
355 }
356 }
357
358 if (!needs_realloc)
359 return 0;
360
361 /* Grab all plane states for the groups that need reallocation to ensure
362 * locking and avoid racy updates. This serializes the update operation,
363 * but there's not much we can do about it as that's the hardware
364 * design.
365 *
366 * Compute the used planes mask for each group at the same time to avoid
367 * looping over the planes separately later.
368 */
369 while (groups) {
370 unsigned int index = ffs(groups) - 1;
371 struct rcar_du_group *group = &rcdu->groups[index];
372 unsigned int used_planes = 0;
373
08058611
LP
374 dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
375 __func__, index);
376
d6aed574 377 for (i = 0; i < group->num_planes; ++i) {
99caede1 378 struct rcar_du_plane *plane = &group->planes[i];
5ee5a81d
LP
379 struct rcar_du_plane_state *plane_state;
380 struct drm_plane_state *s;
381
382 s = drm_atomic_get_plane_state(state, &plane->plane);
383 if (IS_ERR(s))
384 return PTR_ERR(s);
385
386 /* If the plane has been freed in the above loop its
387 * hardware planes must not be added to the used planes
388 * bitmask. However, the current state doesn't reflect
389 * the free state yet, as we've modified the new state
390 * above. Use the local freed planes list to check for
391 * that condition instead.
392 */
08058611
LP
393 if (group_freed_planes[index] & (1 << i)) {
394 dev_dbg(rcdu->dev,
395 "%s: plane (%u,%u) has been freed, skipping\n",
396 __func__, plane->group->index,
397 plane - plane->group->planes);
5ee5a81d 398 continue;
08058611 399 }
5ee5a81d 400
ec69a406 401 plane_state = to_rcar_plane_state(plane->plane.state);
5ee5a81d 402 used_planes |= rcar_du_plane_hwmask(plane_state);
08058611
LP
403
404 dev_dbg(rcdu->dev,
405 "%s: plane (%u,%u) uses %u hwplanes (index %d)\n",
406 __func__, plane->group->index,
407 plane - plane->group->planes,
408 plane_state->format ?
409 plane_state->format->planes : 0,
410 plane_state->hwindex);
5ee5a81d
LP
411 }
412
413 group_free_planes[index] = 0xff & ~used_planes;
414 groups &= ~(1 << index);
08058611
LP
415
416 dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
417 __func__, index, group_free_planes[index]);
5ee5a81d
LP
418 }
419
420 /* Reallocate hardware planes for each plane that needs it. */
421 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
422 struct rcar_du_plane_state *plane_state;
423 struct rcar_du_plane *plane;
c8af99b6
LP
424 unsigned int crtc_planes;
425 unsigned int free;
5ee5a81d
LP
426 int idx;
427
428 if (!state->planes[i])
429 continue;
430
431 plane = to_rcar_plane(state->planes[i]);
ec69a406 432 plane_state = to_rcar_plane_state(state->plane_states[i]);
5ee5a81d 433
08058611
LP
434 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%u)\n", __func__,
435 plane->group->index, plane - plane->group->planes);
436
5ee5a81d
LP
437 /* Skip planes that are being disabled or don't need to be
438 * reallocated.
439 */
440 if (!plane_state->format ||
441 !rcar_du_plane_needs_realloc(plane, plane_state))
442 continue;
443
c8af99b6
LP
444 /* Try to allocate the plane from the free planes currently
445 * associated with the target CRTC to avoid restarting the CRTC
446 * group and thus minimize flicker. If it fails fall back to
447 * allocating from all free planes.
448 */
449 crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
450 ? plane->group->dptsr_planes
451 : ~plane->group->dptsr_planes;
452 free = group_free_planes[plane->group->index];
453
af8ad962 454 idx = rcar_du_plane_hwalloc(plane, plane_state,
c8af99b6
LP
455 free & crtc_planes);
456 if (idx < 0)
af8ad962 457 idx = rcar_du_plane_hwalloc(plane, plane_state,
c8af99b6 458 free);
5ee5a81d
LP
459 if (idx < 0) {
460 dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
461 __func__);
462 return idx;
463 }
464
08058611
LP
465 dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
466 __func__, plane_state->format->planes, idx);
467
5ee5a81d
LP
468 plane_state->hwindex = idx;
469
470 group_free_planes[plane->group->index] &=
471 ~rcar_du_plane_hwmask(plane_state);
08058611
LP
472
473 dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
474 __func__, plane->group->index,
475 group_free_planes[plane->group->index]);
5ee5a81d
LP
476 }
477
478 return 0;
479}
480
8d3f9b22
LP
481struct rcar_du_commit {
482 struct work_struct work;
483 struct drm_device *dev;
484 struct drm_atomic_state *state;
485 u32 crtcs;
486};
487
488static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
489{
490 struct drm_device *dev = commit->dev;
491 struct rcar_du_device *rcdu = dev->dev_private;
492 struct drm_atomic_state *old_state = commit->state;
493
494 /* Apply the atomic update. */
495 drm_atomic_helper_commit_modeset_disables(dev, old_state);
8d3f9b22 496 drm_atomic_helper_commit_modeset_enables(dev, old_state);
1b0fd0ea 497 drm_atomic_helper_commit_planes(dev, old_state, true);
8d3f9b22
LP
498
499 drm_atomic_helper_wait_for_vblanks(dev, old_state);
500
501 drm_atomic_helper_cleanup_planes(dev, old_state);
502
503 drm_atomic_state_free(old_state);
504
505 /* Complete the commit, wake up any waiter. */
506 spin_lock(&rcdu->commit.wait.lock);
507 rcdu->commit.pending &= ~commit->crtcs;
508 wake_up_all_locked(&rcdu->commit.wait);
509 spin_unlock(&rcdu->commit.wait.lock);
510
511 kfree(commit);
512}
513
514static void rcar_du_atomic_work(struct work_struct *work)
515{
516 struct rcar_du_commit *commit =
517 container_of(work, struct rcar_du_commit, work);
518
519 rcar_du_atomic_complete(commit);
520}
521
522static int rcar_du_atomic_commit(struct drm_device *dev,
523 struct drm_atomic_state *state, bool async)
524{
525 struct rcar_du_device *rcdu = dev->dev_private;
526 struct rcar_du_commit *commit;
527 unsigned int i;
528 int ret;
529
530 ret = drm_atomic_helper_prepare_planes(dev, state);
531 if (ret)
532 return ret;
533
534 /* Allocate the commit object. */
535 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
39a3d570
LP
536 if (commit == NULL) {
537 ret = -ENOMEM;
538 goto error;
539 }
8d3f9b22
LP
540
541 INIT_WORK(&commit->work, rcar_du_atomic_work);
542 commit->dev = dev;
543 commit->state = state;
544
545 /* Wait until all affected CRTCs have completed previous commits and
546 * mark them as pending.
547 */
548 for (i = 0; i < dev->mode_config.num_crtc; ++i) {
549 if (state->crtcs[i])
550 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
551 }
552
553 spin_lock(&rcdu->commit.wait.lock);
554 ret = wait_event_interruptible_locked(rcdu->commit.wait,
555 !(rcdu->commit.pending & commit->crtcs));
556 if (ret == 0)
557 rcdu->commit.pending |= commit->crtcs;
558 spin_unlock(&rcdu->commit.wait.lock);
559
560 if (ret) {
561 kfree(commit);
39a3d570 562 goto error;
8d3f9b22
LP
563 }
564
565 /* Swap the state, this is the point of no return. */
566 drm_atomic_helper_swap_state(dev, state);
567
568 if (async)
569 schedule_work(&commit->work);
570 else
571 rcar_du_atomic_complete(commit);
572
573 return 0;
39a3d570
LP
574
575error:
576 drm_atomic_helper_cleanup_planes(dev, state);
577 return ret;
8d3f9b22
LP
578}
579
580/* -----------------------------------------------------------------------------
581 * Initialization
582 */
583
4bf8e196
LP
584static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
585 .fb_create = rcar_du_fb_create,
3864c6f4 586 .output_poll_changed = rcar_du_output_poll_changed,
5ee5a81d 587 .atomic_check = rcar_du_atomic_check,
8d3f9b22 588 .atomic_commit = rcar_du_atomic_commit,
4bf8e196
LP
589};
590
2378ad12
LP
591static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
592 enum rcar_du_output output,
593 struct of_endpoint *ep)
96c02691
LP
594{
595 static const struct {
596 const char *compatible;
597 enum rcar_du_encoder_type type;
598 } encoders[] = {
599 { "adi,adv7123", RCAR_DU_ENCODER_VGA },
637e6194 600 { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
96c02691
LP
601 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
602 };
603
604 enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
605 struct device_node *connector = NULL;
606 struct device_node *encoder = NULL;
f033c0bc 607 struct device_node *ep_node = NULL;
96c02691
LP
608 struct device_node *entity_ep_node;
609 struct device_node *entity;
610 int ret;
611
612 /*
613 * Locate the connected entity and infer its type from the number of
614 * endpoints.
615 */
616 entity = of_graph_get_remote_port_parent(ep->local_node);
617 if (!entity) {
618 dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
619 ep->local_node->full_name);
898a2f38 620 return -ENODEV;
96c02691
LP
621 }
622
623 entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
624
4af642d5 625 for_each_endpoint_of_node(entity, ep_node) {
96c02691
LP
626 if (ep_node == entity_ep_node)
627 continue;
628
629 /*
630 * We've found one endpoint other than the input, this must
631 * be an encoder. Locate the connector.
632 */
633 encoder = entity;
634 connector = of_graph_get_remote_port_parent(ep_node);
635 of_node_put(ep_node);
636
637 if (!connector) {
638 dev_warn(rcdu->dev,
639 "no connector for encoder %s, skipping\n",
640 encoder->full_name);
641 of_node_put(entity_ep_node);
642 of_node_put(encoder);
898a2f38 643 return -ENODEV;
96c02691
LP
644 }
645
646 break;
647 }
648
649 of_node_put(entity_ep_node);
650
651 if (encoder) {
652 /*
653 * If an encoder has been found, get its type based on its
654 * compatible string.
655 */
656 unsigned int i;
657
658 for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
659 if (of_device_is_compatible(encoder,
660 encoders[i].compatible)) {
661 enc_type = encoders[i].type;
662 break;
663 }
664 }
665
666 if (i == ARRAY_SIZE(encoders)) {
667 dev_warn(rcdu->dev,
668 "unknown encoder type for %s, skipping\n",
669 encoder->full_name);
670 of_node_put(encoder);
671 of_node_put(connector);
898a2f38 672 return -EINVAL;
96c02691
LP
673 }
674 } else {
675 /*
676 * If no encoder has been found the entity must be the
677 * connector.
678 */
679 connector = entity;
680 }
681
3ea4d5ec 682 ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
96c02691
LP
683 of_node_put(encoder);
684 of_node_put(connector);
685
64549cdf
LP
686 if (ret && ret != -EPROBE_DEFER)
687 dev_warn(rcdu->dev,
688 "failed to initialize encoder %s (%d), skipping\n",
689 encoder->full_name, ret);
690
898a2f38 691 return ret;
96c02691
LP
692}
693
2378ad12 694static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
96c02691
LP
695{
696 struct device_node *np = rcdu->dev->of_node;
4af642d5 697 struct device_node *ep_node;
96c02691
LP
698 unsigned int num_encoders = 0;
699
700 /*
701 * Iterate over the endpoints and create one encoder for each output
702 * pipeline.
703 */
4af642d5 704 for_each_endpoint_of_node(np, ep_node) {
96c02691
LP
705 enum rcar_du_output output;
706 struct of_endpoint ep;
707 unsigned int i;
708 int ret;
709
96c02691
LP
710 ret = of_graph_parse_endpoint(ep_node, &ep);
711 if (ret < 0) {
712 of_node_put(ep_node);
713 return ret;
714 }
715
716 /* Find the output route corresponding to the port number. */
717 for (i = 0; i < RCAR_DU_OUTPUT_MAX; ++i) {
718 if (rcdu->info->routes[i].possible_crtcs &&
719 rcdu->info->routes[i].port == ep.port) {
720 output = i;
721 break;
722 }
723 }
724
725 if (i == RCAR_DU_OUTPUT_MAX) {
726 dev_warn(rcdu->dev,
727 "port %u references unexisting output, skipping\n",
728 ep.port);
729 continue;
730 }
731
732 /* Process the output pipeline. */
2378ad12 733 ret = rcar_du_encoders_init_one(rcdu, output, &ep);
96c02691 734 if (ret < 0) {
347d761c
LP
735 if (ret == -EPROBE_DEFER) {
736 of_node_put(ep_node);
737 return ret;
738 }
739
347d761c 740 continue;
96c02691
LP
741 }
742
898a2f38 743 num_encoders++;
96c02691
LP
744 }
745
746 return num_encoders;
747}
748
9f6aee95
LP
749static int rcar_du_properties_init(struct rcar_du_device *rcdu)
750{
751 rcdu->props.alpha =
752 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
753 if (rcdu->props.alpha == NULL)
754 return -ENOMEM;
755
756 /* The color key is expressed as an RGB888 triplet stored in a 32-bit
757 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
758 * or enable source color keying (1).
759 */
760 rcdu->props.colorkey =
761 drm_property_create_range(rcdu->ddev, 0, "colorkey",
762 0, 0x01ffffff);
763 if (rcdu->props.colorkey == NULL)
764 return -ENOMEM;
765
766 rcdu->props.zpos =
767 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
768 if (rcdu->props.zpos == NULL)
769 return -ENOMEM;
770
771 return 0;
772}
773
4bf8e196
LP
774int rcar_du_modeset_init(struct rcar_du_device *rcdu)
775{
a5f0ef59
LP
776 static const unsigned int mmio_offsets[] = {
777 DU0_REG_OFFSET, DU2_REG_OFFSET
778 };
779
4bf8e196
LP
780 struct drm_device *dev = rcdu->ddev;
781 struct drm_encoder *encoder;
3864c6f4 782 struct drm_fbdev_cma *fbdev;
96c02691 783 unsigned int num_encoders;
a5f0ef59 784 unsigned int num_groups;
4bf8e196
LP
785 unsigned int i;
786 int ret;
787
3864c6f4 788 drm_mode_config_init(dev);
4bf8e196 789
3864c6f4
LP
790 dev->mode_config.min_width = 0;
791 dev->mode_config.min_height = 0;
792 dev->mode_config.max_width = 4095;
793 dev->mode_config.max_height = 2047;
794 dev->mode_config.funcs = &rcar_du_mode_config_funcs;
4bf8e196 795
a5f0ef59
LP
796 rcdu->num_crtcs = rcdu->info->num_crtcs;
797
9f6aee95
LP
798 ret = rcar_du_properties_init(rcdu);
799 if (ret < 0)
800 return ret;
801
a5f0ef59
LP
802 /* Initialize the groups. */
803 num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
804
805 for (i = 0; i < num_groups; ++i) {
806 struct rcar_du_group *rgrp = &rcdu->groups[i];
cb2025d2 807
5ee5a81d
LP
808 mutex_init(&rgrp->lock);
809
a5f0ef59
LP
810 rgrp->dev = rcdu;
811 rgrp->mmio_offset = mmio_offsets[i];
812 rgrp->index = i;
fe6fbe9a 813 rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U);
4bf8e196 814
2610abfb
LP
815 /* If we have more than one CRTCs in this group pre-associate
816 * planes 0-3 with CRTC 0 and planes 4-7 with CRTC 1 to minimize
817 * flicker occurring when the association is changed.
c8af99b6 818 */
2610abfb 819 rgrp->dptsr_planes = rgrp->num_crtcs > 1 ? 0xf0 : 0;
c8af99b6 820
a5f0ef59 821 ret = rcar_du_planes_init(rgrp);
3463ff67
LP
822 if (ret < 0)
823 return ret;
824 }
4bf8e196 825
a5f0ef59
LP
826 /* Create the CRTCs. */
827 for (i = 0; i < rcdu->num_crtcs; ++i) {
828 struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
829
830 ret = rcar_du_crtc_create(rgrp, i);
831 if (ret < 0)
832 return ret;
833 }
4bf8e196 834
a5f0ef59 835 /* Initialize the encoders. */
90374b5c
LP
836 ret = rcar_du_lvdsenc_init(rcdu);
837 if (ret < 0)
838 return ret;
839
2378ad12 840 ret = rcar_du_encoders_init(rcdu);
96c02691
LP
841 if (ret < 0)
842 return ret;
4bf8e196 843
347d761c
LP
844 if (ret == 0) {
845 dev_err(rcdu->dev, "error: no encoder could be initialized\n");
846 return -EINVAL;
847 }
848
96c02691 849 num_encoders = ret;
4bf8e196 850
ef67a902
LP
851 /* Set the possible CRTCs and possible clones. There's always at least
852 * one way for all encoders to clone each other, set all bits in the
853 * possible clones field.
4bf8e196
LP
854 */
855 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
856 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
ef67a902
LP
857 const struct rcar_du_output_routing *route =
858 &rcdu->info->routes[renc->output];
4bf8e196 859
ef67a902 860 encoder->possible_crtcs = route->possible_crtcs;
96c02691 861 encoder->possible_clones = (1 << num_encoders) - 1;
4bf8e196
LP
862 }
863
3e8da87d
LP
864 drm_mode_config_reset(dev);
865
3864c6f4
LP
866 drm_kms_helper_poll_init(dev);
867
931b7336
LP
868 if (dev->mode_config.num_connector) {
869 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
870 dev->mode_config.num_connector);
871 if (IS_ERR(fbdev))
872 return PTR_ERR(fbdev);
3864c6f4 873
931b7336
LP
874 rcdu->fbdev = fbdev;
875 } else {
876 dev_info(rcdu->dev,
877 "no connector found, disabling fbdev emulation\n");
878 }
4bf8e196
LP
879
880 return 0;
881}
This page took 0.155855 seconds and 5 git commands to generate.