drm: rcar-du: Fix crash with groups that have less than 9 planes
[deliverable/linux.git] / drivers / gpu / drm / rcar-du / rcar_du_kms.c
1 /*
2 * rcar_du_kms.c -- R-Car Display Unit Mode Setting
3 *
4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <drm/drmP.h>
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/drm_fb_cma_helper.h>
20 #include <drm/drm_gem_cma_helper.h>
21
22 #include <linux/of_graph.h>
23 #include <linux/wait.h>
24
25 #include "rcar_du_crtc.h"
26 #include "rcar_du_drv.h"
27 #include "rcar_du_encoder.h"
28 #include "rcar_du_kms.h"
29 #include "rcar_du_lvdsenc.h"
30 #include "rcar_du_regs.h"
31
32 /* -----------------------------------------------------------------------------
33 * Format helpers
34 */
35
36 static const struct rcar_du_format_info rcar_du_format_infos[] = {
37 {
38 .fourcc = DRM_FORMAT_RGB565,
39 .bpp = 16,
40 .planes = 1,
41 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
42 .edf = PnDDCR4_EDF_NONE,
43 }, {
44 .fourcc = DRM_FORMAT_ARGB1555,
45 .bpp = 16,
46 .planes = 1,
47 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
48 .edf = PnDDCR4_EDF_NONE,
49 }, {
50 .fourcc = DRM_FORMAT_XRGB1555,
51 .bpp = 16,
52 .planes = 1,
53 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
54 .edf = PnDDCR4_EDF_NONE,
55 }, {
56 .fourcc = DRM_FORMAT_XRGB8888,
57 .bpp = 32,
58 .planes = 1,
59 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
60 .edf = PnDDCR4_EDF_RGB888,
61 }, {
62 .fourcc = DRM_FORMAT_ARGB8888,
63 .bpp = 32,
64 .planes = 1,
65 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
66 .edf = PnDDCR4_EDF_ARGB8888,
67 }, {
68 .fourcc = DRM_FORMAT_UYVY,
69 .bpp = 16,
70 .planes = 1,
71 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
72 .edf = PnDDCR4_EDF_NONE,
73 }, {
74 .fourcc = DRM_FORMAT_YUYV,
75 .bpp = 16,
76 .planes = 1,
77 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
78 .edf = PnDDCR4_EDF_NONE,
79 }, {
80 .fourcc = DRM_FORMAT_NV12,
81 .bpp = 12,
82 .planes = 2,
83 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
84 .edf = PnDDCR4_EDF_NONE,
85 }, {
86 .fourcc = DRM_FORMAT_NV21,
87 .bpp = 12,
88 .planes = 2,
89 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
90 .edf = PnDDCR4_EDF_NONE,
91 }, {
92 /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
93 .fourcc = DRM_FORMAT_NV16,
94 .bpp = 16,
95 .planes = 2,
96 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
97 .edf = PnDDCR4_EDF_NONE,
98 },
99 };
100
101 const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
102 {
103 unsigned int i;
104
105 for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
106 if (rcar_du_format_infos[i].fourcc == fourcc)
107 return &rcar_du_format_infos[i];
108 }
109
110 return NULL;
111 }
112
113 /* -----------------------------------------------------------------------------
114 * Frame buffer
115 */
116
117 int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
118 struct drm_mode_create_dumb *args)
119 {
120 struct rcar_du_device *rcdu = dev->dev_private;
121 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
122 unsigned int align;
123
124 /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
125 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
126 */
127 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
128 align = 128;
129 else
130 align = 16 * args->bpp / 8;
131
132 args->pitch = roundup(min_pitch, align);
133
134 return drm_gem_cma_dumb_create_internal(file, dev, args);
135 }
136
137 static struct drm_framebuffer *
138 rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
139 struct drm_mode_fb_cmd2 *mode_cmd)
140 {
141 struct rcar_du_device *rcdu = dev->dev_private;
142 const struct rcar_du_format_info *format;
143 unsigned int max_pitch;
144 unsigned int align;
145 unsigned int bpp;
146
147 format = rcar_du_format_info(mode_cmd->pixel_format);
148 if (format == NULL) {
149 dev_dbg(dev->dev, "unsupported pixel format %08x\n",
150 mode_cmd->pixel_format);
151 return ERR_PTR(-EINVAL);
152 }
153
154 /*
155 * The pitch and alignment constraints are expressed in pixels on the
156 * hardware side and in bytes in the DRM API.
157 */
158 bpp = format->planes == 2 ? 1 : format->bpp / 8;
159 max_pitch = 4096 * bpp;
160
161 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
162 align = 128;
163 else
164 align = 16 * bpp;
165
166 if (mode_cmd->pitches[0] & (align - 1) ||
167 mode_cmd->pitches[0] >= max_pitch) {
168 dev_dbg(dev->dev, "invalid pitch value %u\n",
169 mode_cmd->pitches[0]);
170 return ERR_PTR(-EINVAL);
171 }
172
173 if (format->planes == 2) {
174 if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
175 dev_dbg(dev->dev,
176 "luma and chroma pitches do not match\n");
177 return ERR_PTR(-EINVAL);
178 }
179 }
180
181 return drm_fb_cma_create(dev, file_priv, mode_cmd);
182 }
183
184 static void rcar_du_output_poll_changed(struct drm_device *dev)
185 {
186 struct rcar_du_device *rcdu = dev->dev_private;
187
188 drm_fbdev_cma_hotplug_event(rcdu->fbdev);
189 }
190
191 /* -----------------------------------------------------------------------------
192 * Atomic Check and Update
193 */
194
195 /*
196 * Atomic hardware plane allocator
197 *
198 * The hardware plane allocator is solely based on the atomic plane states
199 * without keeping any external state to avoid races between .atomic_check()
200 * and .atomic_commit().
201 *
202 * The core idea is to avoid using a free planes bitmask that would need to be
203 * shared between check and commit handlers with a collective knowledge based on
204 * the allocated hardware plane(s) for each KMS plane. The allocator then loops
205 * over all plane states to compute the free planes bitmask, allocates hardware
206 * planes based on that bitmask, and stores the result back in the plane states.
207 *
208 * For this to work we need to access the current state of planes not touched by
209 * the atomic update. To ensure that it won't be modified, we need to lock all
210 * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
211 * updates from .atomic_check() up to completion (when swapping the states if
212 * the check step has succeeded) or rollback (when freeing the states if the
213 * check step has failed).
214 *
215 * Allocation is performed in the .atomic_check() handler and applied
216 * automatically when the core swaps the old and new states.
217 */
218
219 static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
220 struct rcar_du_plane_state *state)
221 {
222 const struct rcar_du_format_info *cur_format;
223
224 cur_format = to_rcar_plane_state(plane->plane.state)->format;
225
226 /* Lowering the number of planes doesn't strictly require reallocation
227 * as the extra hardware plane will be freed when committing, but doing
228 * so could lead to more fragmentation.
229 */
230 return !cur_format || cur_format->planes != state->format->planes;
231 }
232
233 static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
234 {
235 unsigned int mask;
236
237 if (state->hwindex == -1)
238 return 0;
239
240 mask = 1 << state->hwindex;
241 if (state->format->planes == 2)
242 mask |= 1 << ((state->hwindex + 1) % 8);
243
244 return mask;
245 }
246
247 static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
248 {
249 unsigned int i;
250
251 for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
252 if (!(free & (1 << i)))
253 continue;
254
255 if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
256 break;
257 }
258
259 return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
260 }
261
262 static int rcar_du_atomic_check(struct drm_device *dev,
263 struct drm_atomic_state *state)
264 {
265 struct rcar_du_device *rcdu = dev->dev_private;
266 unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
267 unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
268 bool needs_realloc = false;
269 unsigned int groups = 0;
270 unsigned int i;
271 int ret;
272
273 ret = drm_atomic_helper_check(dev, state);
274 if (ret < 0)
275 return ret;
276
277 /* Check if hardware planes need to be reallocated. */
278 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
279 struct rcar_du_plane_state *plane_state;
280 struct rcar_du_plane *plane;
281 unsigned int index;
282
283 if (!state->planes[i])
284 continue;
285
286 plane = to_rcar_plane(state->planes[i]);
287 plane_state = to_rcar_plane_state(state->plane_states[i]);
288
289 dev_dbg(rcdu->dev, "%s: checking plane (%u,%u)\n", __func__,
290 plane->group->index, plane - plane->group->planes);
291
292 /* If the plane is being disabled we don't need to go through
293 * the full reallocation procedure. Just mark the hardware
294 * plane(s) as freed.
295 */
296 if (!plane_state->format) {
297 dev_dbg(rcdu->dev, "%s: plane is being disabled\n",
298 __func__);
299 index = plane - plane->group->planes;
300 group_freed_planes[plane->group->index] |= 1 << index;
301 plane_state->hwindex = -1;
302 continue;
303 }
304
305 /* If the plane needs to be reallocated mark it as such, and
306 * mark the hardware plane(s) as free.
307 */
308 if (rcar_du_plane_needs_realloc(plane, plane_state)) {
309 dev_dbg(rcdu->dev, "%s: plane needs reallocation\n",
310 __func__);
311 groups |= 1 << plane->group->index;
312 needs_realloc = true;
313
314 index = plane - plane->group->planes;
315 group_freed_planes[plane->group->index] |= 1 << index;
316 plane_state->hwindex = -1;
317 }
318 }
319
320 if (!needs_realloc)
321 return 0;
322
323 /* Grab all plane states for the groups that need reallocation to ensure
324 * locking and avoid racy updates. This serializes the update operation,
325 * but there's not much we can do about it as that's the hardware
326 * design.
327 *
328 * Compute the used planes mask for each group at the same time to avoid
329 * looping over the planes separately later.
330 */
331 while (groups) {
332 unsigned int index = ffs(groups) - 1;
333 struct rcar_du_group *group = &rcdu->groups[index];
334 unsigned int used_planes = 0;
335
336 dev_dbg(rcdu->dev, "%s: finding free planes for group %u\n",
337 __func__, index);
338
339 for (i = 0; i < group->num_planes; ++i) {
340 struct rcar_du_plane *plane = &group->planes[i];
341 struct rcar_du_plane_state *plane_state;
342 struct drm_plane_state *s;
343
344 s = drm_atomic_get_plane_state(state, &plane->plane);
345 if (IS_ERR(s))
346 return PTR_ERR(s);
347
348 /* If the plane has been freed in the above loop its
349 * hardware planes must not be added to the used planes
350 * bitmask. However, the current state doesn't reflect
351 * the free state yet, as we've modified the new state
352 * above. Use the local freed planes list to check for
353 * that condition instead.
354 */
355 if (group_freed_planes[index] & (1 << i)) {
356 dev_dbg(rcdu->dev,
357 "%s: plane (%u,%u) has been freed, skipping\n",
358 __func__, plane->group->index,
359 plane - plane->group->planes);
360 continue;
361 }
362
363 plane_state = to_rcar_plane_state(plane->plane.state);
364 used_planes |= rcar_du_plane_hwmask(plane_state);
365
366 dev_dbg(rcdu->dev,
367 "%s: plane (%u,%u) uses %u hwplanes (index %d)\n",
368 __func__, plane->group->index,
369 plane - plane->group->planes,
370 plane_state->format ?
371 plane_state->format->planes : 0,
372 plane_state->hwindex);
373 }
374
375 group_free_planes[index] = 0xff & ~used_planes;
376 groups &= ~(1 << index);
377
378 dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
379 __func__, index, group_free_planes[index]);
380 }
381
382 /* Reallocate hardware planes for each plane that needs it. */
383 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
384 struct rcar_du_plane_state *plane_state;
385 struct rcar_du_plane *plane;
386 unsigned int crtc_planes;
387 unsigned int free;
388 int idx;
389
390 if (!state->planes[i])
391 continue;
392
393 plane = to_rcar_plane(state->planes[i]);
394 plane_state = to_rcar_plane_state(state->plane_states[i]);
395
396 dev_dbg(rcdu->dev, "%s: allocating plane (%u,%u)\n", __func__,
397 plane->group->index, plane - plane->group->planes);
398
399 /* Skip planes that are being disabled or don't need to be
400 * reallocated.
401 */
402 if (!plane_state->format ||
403 !rcar_du_plane_needs_realloc(plane, plane_state))
404 continue;
405
406 /* Try to allocate the plane from the free planes currently
407 * associated with the target CRTC to avoid restarting the CRTC
408 * group and thus minimize flicker. If it fails fall back to
409 * allocating from all free planes.
410 */
411 crtc_planes = to_rcar_crtc(plane_state->state.crtc)->index % 2
412 ? plane->group->dptsr_planes
413 : ~plane->group->dptsr_planes;
414 free = group_free_planes[plane->group->index];
415
416 idx = rcar_du_plane_hwalloc(plane_state->format->planes,
417 free & crtc_planes);
418 if (idx < 0)
419 idx = rcar_du_plane_hwalloc(plane_state->format->planes,
420 free);
421 if (idx < 0) {
422 dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
423 __func__);
424 return idx;
425 }
426
427 dev_dbg(rcdu->dev, "%s: allocated %u hwplanes (index %u)\n",
428 __func__, plane_state->format->planes, idx);
429
430 plane_state->hwindex = idx;
431
432 group_free_planes[plane->group->index] &=
433 ~rcar_du_plane_hwmask(plane_state);
434
435 dev_dbg(rcdu->dev, "%s: group %u free planes mask 0x%02x\n",
436 __func__, plane->group->index,
437 group_free_planes[plane->group->index]);
438 }
439
440 return 0;
441 }
442
443 struct rcar_du_commit {
444 struct work_struct work;
445 struct drm_device *dev;
446 struct drm_atomic_state *state;
447 u32 crtcs;
448 };
449
450 static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
451 {
452 struct drm_device *dev = commit->dev;
453 struct rcar_du_device *rcdu = dev->dev_private;
454 struct drm_atomic_state *old_state = commit->state;
455
456 /* Apply the atomic update. */
457 drm_atomic_helper_commit_modeset_disables(dev, old_state);
458 drm_atomic_helper_commit_modeset_enables(dev, old_state);
459 drm_atomic_helper_commit_planes(dev, old_state);
460
461 drm_atomic_helper_wait_for_vblanks(dev, old_state);
462
463 drm_atomic_helper_cleanup_planes(dev, old_state);
464
465 drm_atomic_state_free(old_state);
466
467 /* Complete the commit, wake up any waiter. */
468 spin_lock(&rcdu->commit.wait.lock);
469 rcdu->commit.pending &= ~commit->crtcs;
470 wake_up_all_locked(&rcdu->commit.wait);
471 spin_unlock(&rcdu->commit.wait.lock);
472
473 kfree(commit);
474 }
475
476 static void rcar_du_atomic_work(struct work_struct *work)
477 {
478 struct rcar_du_commit *commit =
479 container_of(work, struct rcar_du_commit, work);
480
481 rcar_du_atomic_complete(commit);
482 }
483
484 static int rcar_du_atomic_commit(struct drm_device *dev,
485 struct drm_atomic_state *state, bool async)
486 {
487 struct rcar_du_device *rcdu = dev->dev_private;
488 struct rcar_du_commit *commit;
489 unsigned int i;
490 int ret;
491
492 ret = drm_atomic_helper_prepare_planes(dev, state);
493 if (ret)
494 return ret;
495
496 /* Allocate the commit object. */
497 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
498 if (commit == NULL)
499 return -ENOMEM;
500
501 INIT_WORK(&commit->work, rcar_du_atomic_work);
502 commit->dev = dev;
503 commit->state = state;
504
505 /* Wait until all affected CRTCs have completed previous commits and
506 * mark them as pending.
507 */
508 for (i = 0; i < dev->mode_config.num_crtc; ++i) {
509 if (state->crtcs[i])
510 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
511 }
512
513 spin_lock(&rcdu->commit.wait.lock);
514 ret = wait_event_interruptible_locked(rcdu->commit.wait,
515 !(rcdu->commit.pending & commit->crtcs));
516 if (ret == 0)
517 rcdu->commit.pending |= commit->crtcs;
518 spin_unlock(&rcdu->commit.wait.lock);
519
520 if (ret) {
521 kfree(commit);
522 return ret;
523 }
524
525 /* Swap the state, this is the point of no return. */
526 drm_atomic_helper_swap_state(dev, state);
527
528 if (async)
529 schedule_work(&commit->work);
530 else
531 rcar_du_atomic_complete(commit);
532
533 return 0;
534 }
535
536 /* -----------------------------------------------------------------------------
537 * Initialization
538 */
539
540 static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
541 .fb_create = rcar_du_fb_create,
542 .output_poll_changed = rcar_du_output_poll_changed,
543 .atomic_check = rcar_du_atomic_check,
544 .atomic_commit = rcar_du_atomic_commit,
545 };
546
547 static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
548 enum rcar_du_output output,
549 struct of_endpoint *ep)
550 {
551 static const struct {
552 const char *compatible;
553 enum rcar_du_encoder_type type;
554 } encoders[] = {
555 { "adi,adv7123", RCAR_DU_ENCODER_VGA },
556 { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
557 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
558 };
559
560 enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
561 struct device_node *connector = NULL;
562 struct device_node *encoder = NULL;
563 struct device_node *ep_node = NULL;
564 struct device_node *entity_ep_node;
565 struct device_node *entity;
566 int ret;
567
568 /*
569 * Locate the connected entity and infer its type from the number of
570 * endpoints.
571 */
572 entity = of_graph_get_remote_port_parent(ep->local_node);
573 if (!entity) {
574 dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
575 ep->local_node->full_name);
576 return 0;
577 }
578
579 entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
580
581 for_each_endpoint_of_node(entity, ep_node) {
582 if (ep_node == entity_ep_node)
583 continue;
584
585 /*
586 * We've found one endpoint other than the input, this must
587 * be an encoder. Locate the connector.
588 */
589 encoder = entity;
590 connector = of_graph_get_remote_port_parent(ep_node);
591 of_node_put(ep_node);
592
593 if (!connector) {
594 dev_warn(rcdu->dev,
595 "no connector for encoder %s, skipping\n",
596 encoder->full_name);
597 of_node_put(entity_ep_node);
598 of_node_put(encoder);
599 return 0;
600 }
601
602 break;
603 }
604
605 of_node_put(entity_ep_node);
606
607 if (encoder) {
608 /*
609 * If an encoder has been found, get its type based on its
610 * compatible string.
611 */
612 unsigned int i;
613
614 for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
615 if (of_device_is_compatible(encoder,
616 encoders[i].compatible)) {
617 enc_type = encoders[i].type;
618 break;
619 }
620 }
621
622 if (i == ARRAY_SIZE(encoders)) {
623 dev_warn(rcdu->dev,
624 "unknown encoder type for %s, skipping\n",
625 encoder->full_name);
626 of_node_put(encoder);
627 of_node_put(connector);
628 return 0;
629 }
630 } else {
631 /*
632 * If no encoder has been found the entity must be the
633 * connector.
634 */
635 connector = entity;
636 }
637
638 ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
639 of_node_put(encoder);
640 of_node_put(connector);
641
642 return ret < 0 ? ret : 1;
643 }
644
645 static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
646 {
647 struct device_node *np = rcdu->dev->of_node;
648 struct device_node *ep_node;
649 unsigned int num_encoders = 0;
650
651 /*
652 * Iterate over the endpoints and create one encoder for each output
653 * pipeline.
654 */
655 for_each_endpoint_of_node(np, ep_node) {
656 enum rcar_du_output output;
657 struct of_endpoint ep;
658 unsigned int i;
659 int ret;
660
661 ret = of_graph_parse_endpoint(ep_node, &ep);
662 if (ret < 0) {
663 of_node_put(ep_node);
664 return ret;
665 }
666
667 /* Find the output route corresponding to the port number. */
668 for (i = 0; i < RCAR_DU_OUTPUT_MAX; ++i) {
669 if (rcdu->info->routes[i].possible_crtcs &&
670 rcdu->info->routes[i].port == ep.port) {
671 output = i;
672 break;
673 }
674 }
675
676 if (i == RCAR_DU_OUTPUT_MAX) {
677 dev_warn(rcdu->dev,
678 "port %u references unexisting output, skipping\n",
679 ep.port);
680 continue;
681 }
682
683 /* Process the output pipeline. */
684 ret = rcar_du_encoders_init_one(rcdu, output, &ep);
685 if (ret < 0) {
686 if (ret == -EPROBE_DEFER) {
687 of_node_put(ep_node);
688 return ret;
689 }
690
691 dev_info(rcdu->dev,
692 "encoder initialization failed, skipping\n");
693 continue;
694 }
695
696 num_encoders += ret;
697 }
698
699 return num_encoders;
700 }
701
702 static int rcar_du_properties_init(struct rcar_du_device *rcdu)
703 {
704 rcdu->props.alpha =
705 drm_property_create_range(rcdu->ddev, 0, "alpha", 0, 255);
706 if (rcdu->props.alpha == NULL)
707 return -ENOMEM;
708
709 /* The color key is expressed as an RGB888 triplet stored in a 32-bit
710 * integer in XRGB8888 format. Bit 24 is used as a flag to disable (0)
711 * or enable source color keying (1).
712 */
713 rcdu->props.colorkey =
714 drm_property_create_range(rcdu->ddev, 0, "colorkey",
715 0, 0x01ffffff);
716 if (rcdu->props.colorkey == NULL)
717 return -ENOMEM;
718
719 rcdu->props.zpos =
720 drm_property_create_range(rcdu->ddev, 0, "zpos", 1, 7);
721 if (rcdu->props.zpos == NULL)
722 return -ENOMEM;
723
724 return 0;
725 }
726
727 int rcar_du_modeset_init(struct rcar_du_device *rcdu)
728 {
729 static const unsigned int mmio_offsets[] = {
730 DU0_REG_OFFSET, DU2_REG_OFFSET
731 };
732
733 struct drm_device *dev = rcdu->ddev;
734 struct drm_encoder *encoder;
735 struct drm_fbdev_cma *fbdev;
736 unsigned int num_encoders;
737 unsigned int num_groups;
738 unsigned int i;
739 int ret;
740
741 drm_mode_config_init(dev);
742
743 dev->mode_config.min_width = 0;
744 dev->mode_config.min_height = 0;
745 dev->mode_config.max_width = 4095;
746 dev->mode_config.max_height = 2047;
747 dev->mode_config.funcs = &rcar_du_mode_config_funcs;
748
749 rcdu->num_crtcs = rcdu->info->num_crtcs;
750
751 ret = rcar_du_properties_init(rcdu);
752 if (ret < 0)
753 return ret;
754
755 /* Initialize the groups. */
756 num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
757
758 for (i = 0; i < num_groups; ++i) {
759 struct rcar_du_group *rgrp = &rcdu->groups[i];
760
761 mutex_init(&rgrp->lock);
762
763 rgrp->dev = rcdu;
764 rgrp->mmio_offset = mmio_offsets[i];
765 rgrp->index = i;
766 rgrp->num_crtcs = min(rcdu->num_crtcs - 2 * i, 2U);
767
768 /* If we have more than one CRTCs in this group pre-associate
769 * planes 0-3 with CRTC 0 and planes 4-7 with CRTC 1 to minimize
770 * flicker occurring when the association is changed.
771 */
772 rgrp->dptsr_planes = rgrp->num_crtcs > 1 ? 0xf0 : 0;
773
774 ret = rcar_du_planes_init(rgrp);
775 if (ret < 0)
776 return ret;
777 }
778
779 /* Create the CRTCs. */
780 for (i = 0; i < rcdu->num_crtcs; ++i) {
781 struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
782
783 ret = rcar_du_crtc_create(rgrp, i);
784 if (ret < 0)
785 return ret;
786 }
787
788 /* Initialize the encoders. */
789 ret = rcar_du_lvdsenc_init(rcdu);
790 if (ret < 0)
791 return ret;
792
793 ret = rcar_du_encoders_init(rcdu);
794 if (ret < 0)
795 return ret;
796
797 if (ret == 0) {
798 dev_err(rcdu->dev, "error: no encoder could be initialized\n");
799 return -EINVAL;
800 }
801
802 num_encoders = ret;
803
804 /* Set the possible CRTCs and possible clones. There's always at least
805 * one way for all encoders to clone each other, set all bits in the
806 * possible clones field.
807 */
808 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
809 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
810 const struct rcar_du_output_routing *route =
811 &rcdu->info->routes[renc->output];
812
813 encoder->possible_crtcs = route->possible_crtcs;
814 encoder->possible_clones = (1 << num_encoders) - 1;
815 }
816
817 drm_mode_config_reset(dev);
818
819 drm_kms_helper_poll_init(dev);
820
821 if (dev->mode_config.num_connector) {
822 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
823 dev->mode_config.num_connector);
824 if (IS_ERR(fbdev))
825 return PTR_ERR(fbdev);
826
827 rcdu->fbdev = fbdev;
828 } else {
829 dev_info(rcdu->dev,
830 "no connector found, disabling fbdev emulation\n");
831 }
832
833 return 0;
834 }
This page took 0.061971 seconds and 5 git commands to generate.