drm: rcar-du: Fix race condition in hardware plane allocator
[deliverable/linux.git] / drivers / gpu / drm / rcar-du / rcar_du_kms.c
1 /*
2 * rcar_du_kms.c -- R-Car Display Unit Mode Setting
3 *
4 * Copyright (C) 2013-2014 Renesas Electronics Corporation
5 *
6 * Contact: Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <drm/drmP.h>
15 #include <drm/drm_atomic.h>
16 #include <drm/drm_atomic_helper.h>
17 #include <drm/drm_crtc.h>
18 #include <drm/drm_crtc_helper.h>
19 #include <drm/drm_fb_cma_helper.h>
20 #include <drm/drm_gem_cma_helper.h>
21
22 #include <linux/of_graph.h>
23 #include <linux/wait.h>
24
25 #include "rcar_du_crtc.h"
26 #include "rcar_du_drv.h"
27 #include "rcar_du_encoder.h"
28 #include "rcar_du_kms.h"
29 #include "rcar_du_lvdsenc.h"
30 #include "rcar_du_regs.h"
31
32 /* -----------------------------------------------------------------------------
33 * Format helpers
34 */
35
36 static const struct rcar_du_format_info rcar_du_format_infos[] = {
37 {
38 .fourcc = DRM_FORMAT_RGB565,
39 .bpp = 16,
40 .planes = 1,
41 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
42 .edf = PnDDCR4_EDF_NONE,
43 }, {
44 .fourcc = DRM_FORMAT_ARGB1555,
45 .bpp = 16,
46 .planes = 1,
47 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
48 .edf = PnDDCR4_EDF_NONE,
49 }, {
50 .fourcc = DRM_FORMAT_XRGB1555,
51 .bpp = 16,
52 .planes = 1,
53 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_ARGB,
54 .edf = PnDDCR4_EDF_NONE,
55 }, {
56 .fourcc = DRM_FORMAT_XRGB8888,
57 .bpp = 32,
58 .planes = 1,
59 .pnmr = PnMR_SPIM_TP | PnMR_DDDF_16BPP,
60 .edf = PnDDCR4_EDF_RGB888,
61 }, {
62 .fourcc = DRM_FORMAT_ARGB8888,
63 .bpp = 32,
64 .planes = 1,
65 .pnmr = PnMR_SPIM_ALP | PnMR_DDDF_16BPP,
66 .edf = PnDDCR4_EDF_ARGB8888,
67 }, {
68 .fourcc = DRM_FORMAT_UYVY,
69 .bpp = 16,
70 .planes = 1,
71 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
72 .edf = PnDDCR4_EDF_NONE,
73 }, {
74 .fourcc = DRM_FORMAT_YUYV,
75 .bpp = 16,
76 .planes = 1,
77 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
78 .edf = PnDDCR4_EDF_NONE,
79 }, {
80 .fourcc = DRM_FORMAT_NV12,
81 .bpp = 12,
82 .planes = 2,
83 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
84 .edf = PnDDCR4_EDF_NONE,
85 }, {
86 .fourcc = DRM_FORMAT_NV21,
87 .bpp = 12,
88 .planes = 2,
89 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
90 .edf = PnDDCR4_EDF_NONE,
91 }, {
92 /* In YUV 4:2:2, only NV16 is supported (NV61 isn't) */
93 .fourcc = DRM_FORMAT_NV16,
94 .bpp = 16,
95 .planes = 2,
96 .pnmr = PnMR_SPIM_TP_OFF | PnMR_DDDF_YC,
97 .edf = PnDDCR4_EDF_NONE,
98 },
99 };
100
101 const struct rcar_du_format_info *rcar_du_format_info(u32 fourcc)
102 {
103 unsigned int i;
104
105 for (i = 0; i < ARRAY_SIZE(rcar_du_format_infos); ++i) {
106 if (rcar_du_format_infos[i].fourcc == fourcc)
107 return &rcar_du_format_infos[i];
108 }
109
110 return NULL;
111 }
112
113 /* -----------------------------------------------------------------------------
114 * Frame buffer
115 */
116
117 int rcar_du_dumb_create(struct drm_file *file, struct drm_device *dev,
118 struct drm_mode_create_dumb *args)
119 {
120 struct rcar_du_device *rcdu = dev->dev_private;
121 unsigned int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
122 unsigned int align;
123
124 /* The R8A7779 DU requires a 16 pixels pitch alignment as documented,
125 * but the R8A7790 DU seems to require a 128 bytes pitch alignment.
126 */
127 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
128 align = 128;
129 else
130 align = 16 * args->bpp / 8;
131
132 args->pitch = roundup(min_pitch, align);
133
134 return drm_gem_cma_dumb_create_internal(file, dev, args);
135 }
136
137 static struct drm_framebuffer *
138 rcar_du_fb_create(struct drm_device *dev, struct drm_file *file_priv,
139 struct drm_mode_fb_cmd2 *mode_cmd)
140 {
141 struct rcar_du_device *rcdu = dev->dev_private;
142 const struct rcar_du_format_info *format;
143 unsigned int max_pitch;
144 unsigned int align;
145 unsigned int bpp;
146
147 format = rcar_du_format_info(mode_cmd->pixel_format);
148 if (format == NULL) {
149 dev_dbg(dev->dev, "unsupported pixel format %08x\n",
150 mode_cmd->pixel_format);
151 return ERR_PTR(-EINVAL);
152 }
153
154 /*
155 * The pitch and alignment constraints are expressed in pixels on the
156 * hardware side and in bytes in the DRM API.
157 */
158 bpp = format->planes == 2 ? 1 : format->bpp / 8;
159 max_pitch = 4096 * bpp;
160
161 if (rcar_du_needs(rcdu, RCAR_DU_QUIRK_ALIGN_128B))
162 align = 128;
163 else
164 align = 16 * bpp;
165
166 if (mode_cmd->pitches[0] & (align - 1) ||
167 mode_cmd->pitches[0] >= max_pitch) {
168 dev_dbg(dev->dev, "invalid pitch value %u\n",
169 mode_cmd->pitches[0]);
170 return ERR_PTR(-EINVAL);
171 }
172
173 if (format->planes == 2) {
174 if (mode_cmd->pitches[1] != mode_cmd->pitches[0]) {
175 dev_dbg(dev->dev,
176 "luma and chroma pitches do not match\n");
177 return ERR_PTR(-EINVAL);
178 }
179 }
180
181 return drm_fb_cma_create(dev, file_priv, mode_cmd);
182 }
183
184 static void rcar_du_output_poll_changed(struct drm_device *dev)
185 {
186 struct rcar_du_device *rcdu = dev->dev_private;
187
188 drm_fbdev_cma_hotplug_event(rcdu->fbdev);
189 }
190
191 /* -----------------------------------------------------------------------------
192 * Atomic Check and Update
193 */
194
195 /*
196 * Atomic hardware plane allocator
197 *
198 * The hardware plane allocator is solely based on the atomic plane states
199 * without keeping any external state to avoid races between .atomic_check()
200 * and .atomic_commit().
201 *
202 * The core idea is to avoid using a free planes bitmask that would need to be
203 * shared between check and commit handlers with a collective knowledge based on
204 * the allocated hardware plane(s) for each KMS plane. The allocator then loops
205 * over all plane states to compute the free planes bitmask, allocates hardware
206 * planes based on that bitmask, and stores the result back in the plane states.
207 *
208 * For this to work we need to access the current state of planes not touched by
209 * the atomic update. To ensure that it won't be modified, we need to lock all
210 * planes using drm_atomic_get_plane_state(). This effectively serializes atomic
211 * updates from .atomic_check() up to completion (when swapping the states if
212 * the check step has succeeded) or rollback (when freeing the states if the
213 * check step has failed).
214 *
215 * Allocation is performed in the .atomic_check() handler and applied
216 * automatically when the core swaps the old and new states.
217 */
218
219 static bool rcar_du_plane_needs_realloc(struct rcar_du_plane *plane,
220 struct rcar_du_plane_state *state)
221 {
222 const struct rcar_du_format_info *cur_format;
223
224 cur_format = to_rcar_du_plane_state(plane->plane.state)->format;
225
226 /* Lowering the number of planes doesn't strictly require reallocation
227 * as the extra hardware plane will be freed when committing, but doing
228 * so could lead to more fragmentation.
229 */
230 return !cur_format || cur_format->planes != state->format->planes;
231 }
232
233 static unsigned int rcar_du_plane_hwmask(struct rcar_du_plane_state *state)
234 {
235 unsigned int mask;
236
237 if (state->hwindex == -1)
238 return 0;
239
240 mask = 1 << state->hwindex;
241 if (state->format->planes == 2)
242 mask |= 1 << ((state->hwindex + 1) % 8);
243
244 return mask;
245 }
246
247 static int rcar_du_plane_hwalloc(unsigned int num_planes, unsigned int free)
248 {
249 unsigned int i;
250
251 for (i = 0; i < RCAR_DU_NUM_HW_PLANES; ++i) {
252 if (!(free & (1 << i)))
253 continue;
254
255 if (num_planes == 1 || free & (1 << ((i + 1) % 8)))
256 break;
257 }
258
259 return i == RCAR_DU_NUM_HW_PLANES ? -EBUSY : i;
260 }
261
262 static int rcar_du_atomic_check(struct drm_device *dev,
263 struct drm_atomic_state *state)
264 {
265 struct rcar_du_device *rcdu = dev->dev_private;
266 unsigned int group_freed_planes[RCAR_DU_MAX_GROUPS] = { 0, };
267 unsigned int group_free_planes[RCAR_DU_MAX_GROUPS] = { 0, };
268 bool needs_realloc = false;
269 unsigned int groups = 0;
270 unsigned int i;
271 int ret;
272
273 ret = drm_atomic_helper_check(dev, state);
274 if (ret < 0)
275 return ret;
276
277 /* Check if hardware planes need to be reallocated. */
278 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
279 struct rcar_du_plane_state *plane_state;
280 struct rcar_du_plane *plane;
281 unsigned int index;
282
283 if (!state->planes[i])
284 continue;
285
286 plane = to_rcar_plane(state->planes[i]);
287 plane_state = to_rcar_du_plane_state(state->plane_states[i]);
288
289 /* If the plane is being disabled we don't need to go through
290 * the full reallocation procedure. Just mark the hardware
291 * plane(s) as freed.
292 */
293 if (!plane_state->format) {
294 index = plane - plane->group->planes.planes;
295 group_freed_planes[plane->group->index] |= 1 << index;
296 plane_state->hwindex = -1;
297 continue;
298 }
299
300 /* If the plane needs to be reallocated mark it as such, and
301 * mark the hardware plane(s) as free.
302 */
303 if (rcar_du_plane_needs_realloc(plane, plane_state)) {
304 groups |= 1 << plane->group->index;
305 needs_realloc = true;
306
307 index = plane - plane->group->planes.planes;
308 group_freed_planes[plane->group->index] |= 1 << index;
309 plane_state->hwindex = -1;
310 }
311 }
312
313 if (!needs_realloc)
314 return 0;
315
316 /* Grab all plane states for the groups that need reallocation to ensure
317 * locking and avoid racy updates. This serializes the update operation,
318 * but there's not much we can do about it as that's the hardware
319 * design.
320 *
321 * Compute the used planes mask for each group at the same time to avoid
322 * looping over the planes separately later.
323 */
324 while (groups) {
325 unsigned int index = ffs(groups) - 1;
326 struct rcar_du_group *group = &rcdu->groups[index];
327 unsigned int used_planes = 0;
328
329 for (i = 0; i < RCAR_DU_NUM_KMS_PLANES; ++i) {
330 struct rcar_du_plane *plane = &group->planes.planes[i];
331 struct rcar_du_plane_state *plane_state;
332 struct drm_plane_state *s;
333
334 s = drm_atomic_get_plane_state(state, &plane->plane);
335 if (IS_ERR(s))
336 return PTR_ERR(s);
337
338 /* If the plane has been freed in the above loop its
339 * hardware planes must not be added to the used planes
340 * bitmask. However, the current state doesn't reflect
341 * the free state yet, as we've modified the new state
342 * above. Use the local freed planes list to check for
343 * that condition instead.
344 */
345 if (group_freed_planes[index] & (1 << i))
346 continue;
347
348 plane_state = to_rcar_du_plane_state(plane->plane.state);
349 used_planes |= rcar_du_plane_hwmask(plane_state);
350 }
351
352 group_free_planes[index] = 0xff & ~used_planes;
353 groups &= ~(1 << index);
354 }
355
356 /* Reallocate hardware planes for each plane that needs it. */
357 for (i = 0; i < dev->mode_config.num_total_plane; ++i) {
358 struct rcar_du_plane_state *plane_state;
359 struct rcar_du_plane *plane;
360 int idx;
361
362 if (!state->planes[i])
363 continue;
364
365 plane = to_rcar_plane(state->planes[i]);
366 plane_state = to_rcar_du_plane_state(state->plane_states[i]);
367
368 /* Skip planes that are being disabled or don't need to be
369 * reallocated.
370 */
371 if (!plane_state->format ||
372 !rcar_du_plane_needs_realloc(plane, plane_state))
373 continue;
374
375 idx = rcar_du_plane_hwalloc(plane_state->format->planes,
376 group_free_planes[plane->group->index]);
377 if (idx < 0) {
378 dev_dbg(rcdu->dev, "%s: no available hardware plane\n",
379 __func__);
380 return idx;
381 }
382
383 plane_state->hwindex = idx;
384
385 group_free_planes[plane->group->index] &=
386 ~rcar_du_plane_hwmask(plane_state);
387 }
388
389 return 0;
390 }
391
392 struct rcar_du_commit {
393 struct work_struct work;
394 struct drm_device *dev;
395 struct drm_atomic_state *state;
396 u32 crtcs;
397 };
398
399 static void rcar_du_atomic_complete(struct rcar_du_commit *commit)
400 {
401 struct drm_device *dev = commit->dev;
402 struct rcar_du_device *rcdu = dev->dev_private;
403 struct drm_atomic_state *old_state = commit->state;
404
405 /* Apply the atomic update. */
406 drm_atomic_helper_commit_modeset_disables(dev, old_state);
407 drm_atomic_helper_commit_modeset_enables(dev, old_state);
408 drm_atomic_helper_commit_planes(dev, old_state);
409
410 drm_atomic_helper_wait_for_vblanks(dev, old_state);
411
412 drm_atomic_helper_cleanup_planes(dev, old_state);
413
414 drm_atomic_state_free(old_state);
415
416 /* Complete the commit, wake up any waiter. */
417 spin_lock(&rcdu->commit.wait.lock);
418 rcdu->commit.pending &= ~commit->crtcs;
419 wake_up_all_locked(&rcdu->commit.wait);
420 spin_unlock(&rcdu->commit.wait.lock);
421
422 kfree(commit);
423 }
424
425 static void rcar_du_atomic_work(struct work_struct *work)
426 {
427 struct rcar_du_commit *commit =
428 container_of(work, struct rcar_du_commit, work);
429
430 rcar_du_atomic_complete(commit);
431 }
432
433 static int rcar_du_atomic_commit(struct drm_device *dev,
434 struct drm_atomic_state *state, bool async)
435 {
436 struct rcar_du_device *rcdu = dev->dev_private;
437 struct rcar_du_commit *commit;
438 unsigned int i;
439 int ret;
440
441 ret = drm_atomic_helper_prepare_planes(dev, state);
442 if (ret)
443 return ret;
444
445 /* Allocate the commit object. */
446 commit = kzalloc(sizeof(*commit), GFP_KERNEL);
447 if (commit == NULL)
448 return -ENOMEM;
449
450 INIT_WORK(&commit->work, rcar_du_atomic_work);
451 commit->dev = dev;
452 commit->state = state;
453
454 /* Wait until all affected CRTCs have completed previous commits and
455 * mark them as pending.
456 */
457 for (i = 0; i < dev->mode_config.num_crtc; ++i) {
458 if (state->crtcs[i])
459 commit->crtcs |= 1 << drm_crtc_index(state->crtcs[i]);
460 }
461
462 spin_lock(&rcdu->commit.wait.lock);
463 ret = wait_event_interruptible_locked(rcdu->commit.wait,
464 !(rcdu->commit.pending & commit->crtcs));
465 if (ret == 0)
466 rcdu->commit.pending |= commit->crtcs;
467 spin_unlock(&rcdu->commit.wait.lock);
468
469 if (ret) {
470 kfree(commit);
471 return ret;
472 }
473
474 /* Swap the state, this is the point of no return. */
475 drm_atomic_helper_swap_state(dev, state);
476
477 if (async)
478 schedule_work(&commit->work);
479 else
480 rcar_du_atomic_complete(commit);
481
482 return 0;
483 }
484
485 /* -----------------------------------------------------------------------------
486 * Initialization
487 */
488
489 static const struct drm_mode_config_funcs rcar_du_mode_config_funcs = {
490 .fb_create = rcar_du_fb_create,
491 .output_poll_changed = rcar_du_output_poll_changed,
492 .atomic_check = rcar_du_atomic_check,
493 .atomic_commit = rcar_du_atomic_commit,
494 };
495
496 static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
497 enum rcar_du_output output,
498 struct of_endpoint *ep)
499 {
500 static const struct {
501 const char *compatible;
502 enum rcar_du_encoder_type type;
503 } encoders[] = {
504 { "adi,adv7123", RCAR_DU_ENCODER_VGA },
505 { "adi,adv7511w", RCAR_DU_ENCODER_HDMI },
506 { "thine,thc63lvdm83d", RCAR_DU_ENCODER_LVDS },
507 };
508
509 enum rcar_du_encoder_type enc_type = RCAR_DU_ENCODER_NONE;
510 struct device_node *connector = NULL;
511 struct device_node *encoder = NULL;
512 struct device_node *prev = NULL;
513 struct device_node *entity_ep_node;
514 struct device_node *entity;
515 int ret;
516
517 /*
518 * Locate the connected entity and infer its type from the number of
519 * endpoints.
520 */
521 entity = of_graph_get_remote_port_parent(ep->local_node);
522 if (!entity) {
523 dev_dbg(rcdu->dev, "unconnected endpoint %s, skipping\n",
524 ep->local_node->full_name);
525 return 0;
526 }
527
528 entity_ep_node = of_parse_phandle(ep->local_node, "remote-endpoint", 0);
529
530 while (1) {
531 struct device_node *ep_node;
532
533 ep_node = of_graph_get_next_endpoint(entity, prev);
534 of_node_put(prev);
535 prev = ep_node;
536
537 if (!ep_node)
538 break;
539
540 if (ep_node == entity_ep_node)
541 continue;
542
543 /*
544 * We've found one endpoint other than the input, this must
545 * be an encoder. Locate the connector.
546 */
547 encoder = entity;
548 connector = of_graph_get_remote_port_parent(ep_node);
549 of_node_put(ep_node);
550
551 if (!connector) {
552 dev_warn(rcdu->dev,
553 "no connector for encoder %s, skipping\n",
554 encoder->full_name);
555 of_node_put(entity_ep_node);
556 of_node_put(encoder);
557 return 0;
558 }
559
560 break;
561 }
562
563 of_node_put(entity_ep_node);
564
565 if (encoder) {
566 /*
567 * If an encoder has been found, get its type based on its
568 * compatible string.
569 */
570 unsigned int i;
571
572 for (i = 0; i < ARRAY_SIZE(encoders); ++i) {
573 if (of_device_is_compatible(encoder,
574 encoders[i].compatible)) {
575 enc_type = encoders[i].type;
576 break;
577 }
578 }
579
580 if (i == ARRAY_SIZE(encoders)) {
581 dev_warn(rcdu->dev,
582 "unknown encoder type for %s, skipping\n",
583 encoder->full_name);
584 of_node_put(encoder);
585 of_node_put(connector);
586 return 0;
587 }
588 } else {
589 /*
590 * If no encoder has been found the entity must be the
591 * connector.
592 */
593 connector = entity;
594 }
595
596 ret = rcar_du_encoder_init(rcdu, enc_type, output, encoder, connector);
597 of_node_put(encoder);
598 of_node_put(connector);
599
600 return ret < 0 ? ret : 1;
601 }
602
603 static int rcar_du_encoders_init(struct rcar_du_device *rcdu)
604 {
605 struct device_node *np = rcdu->dev->of_node;
606 struct device_node *prev = NULL;
607 unsigned int num_encoders = 0;
608
609 /*
610 * Iterate over the endpoints and create one encoder for each output
611 * pipeline.
612 */
613 while (1) {
614 struct device_node *ep_node;
615 enum rcar_du_output output;
616 struct of_endpoint ep;
617 unsigned int i;
618 int ret;
619
620 ep_node = of_graph_get_next_endpoint(np, prev);
621 of_node_put(prev);
622 prev = ep_node;
623
624 if (ep_node == NULL)
625 break;
626
627 ret = of_graph_parse_endpoint(ep_node, &ep);
628 if (ret < 0) {
629 of_node_put(ep_node);
630 return ret;
631 }
632
633 /* Find the output route corresponding to the port number. */
634 for (i = 0; i < RCAR_DU_OUTPUT_MAX; ++i) {
635 if (rcdu->info->routes[i].possible_crtcs &&
636 rcdu->info->routes[i].port == ep.port) {
637 output = i;
638 break;
639 }
640 }
641
642 if (i == RCAR_DU_OUTPUT_MAX) {
643 dev_warn(rcdu->dev,
644 "port %u references unexisting output, skipping\n",
645 ep.port);
646 continue;
647 }
648
649 /* Process the output pipeline. */
650 ret = rcar_du_encoders_init_one(rcdu, output, &ep);
651 if (ret < 0) {
652 if (ret == -EPROBE_DEFER) {
653 of_node_put(ep_node);
654 return ret;
655 }
656
657 dev_info(rcdu->dev,
658 "encoder initialization failed, skipping\n");
659 continue;
660 }
661
662 num_encoders += ret;
663 }
664
665 return num_encoders;
666 }
667
668 int rcar_du_modeset_init(struct rcar_du_device *rcdu)
669 {
670 static const unsigned int mmio_offsets[] = {
671 DU0_REG_OFFSET, DU2_REG_OFFSET
672 };
673
674 struct drm_device *dev = rcdu->ddev;
675 struct drm_encoder *encoder;
676 struct drm_fbdev_cma *fbdev;
677 unsigned int num_encoders;
678 unsigned int num_groups;
679 unsigned int i;
680 int ret;
681
682 drm_mode_config_init(dev);
683
684 dev->mode_config.min_width = 0;
685 dev->mode_config.min_height = 0;
686 dev->mode_config.max_width = 4095;
687 dev->mode_config.max_height = 2047;
688 dev->mode_config.funcs = &rcar_du_mode_config_funcs;
689
690 rcdu->num_crtcs = rcdu->info->num_crtcs;
691
692 /* Initialize the groups. */
693 num_groups = DIV_ROUND_UP(rcdu->num_crtcs, 2);
694
695 for (i = 0; i < num_groups; ++i) {
696 struct rcar_du_group *rgrp = &rcdu->groups[i];
697
698 mutex_init(&rgrp->lock);
699
700 rgrp->dev = rcdu;
701 rgrp->mmio_offset = mmio_offsets[i];
702 rgrp->index = i;
703
704 ret = rcar_du_planes_init(rgrp);
705 if (ret < 0)
706 return ret;
707 }
708
709 /* Create the CRTCs. */
710 for (i = 0; i < rcdu->num_crtcs; ++i) {
711 struct rcar_du_group *rgrp = &rcdu->groups[i / 2];
712
713 ret = rcar_du_crtc_create(rgrp, i);
714 if (ret < 0)
715 return ret;
716 }
717
718 /* Initialize the encoders. */
719 ret = rcar_du_lvdsenc_init(rcdu);
720 if (ret < 0)
721 return ret;
722
723 ret = rcar_du_encoders_init(rcdu);
724 if (ret < 0)
725 return ret;
726
727 if (ret == 0) {
728 dev_err(rcdu->dev, "error: no encoder could be initialized\n");
729 return -EINVAL;
730 }
731
732 num_encoders = ret;
733
734 /* Set the possible CRTCs and possible clones. There's always at least
735 * one way for all encoders to clone each other, set all bits in the
736 * possible clones field.
737 */
738 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
739 struct rcar_du_encoder *renc = to_rcar_encoder(encoder);
740 const struct rcar_du_output_routing *route =
741 &rcdu->info->routes[renc->output];
742
743 encoder->possible_crtcs = route->possible_crtcs;
744 encoder->possible_clones = (1 << num_encoders) - 1;
745 }
746
747 drm_mode_config_reset(dev);
748
749 drm_kms_helper_poll_init(dev);
750
751 if (dev->mode_config.num_connector) {
752 fbdev = drm_fbdev_cma_init(dev, 32, dev->mode_config.num_crtc,
753 dev->mode_config.num_connector);
754 if (IS_ERR(fbdev))
755 return PTR_ERR(fbdev);
756
757 rcdu->fbdev = fbdev;
758 } else {
759 dev_info(rcdu->dev,
760 "no connector found, disabling fbdev emulation\n");
761 }
762
763 return 0;
764 }
This page took 0.048812 seconds and 5 git commands to generate.