OMAPDSS: hide manager's enable/disable()
[deliverable/linux.git] / drivers / video / omap2 / dss / apply.c
1 /*
2 * Copyright (C) 2011 Texas Instruments
3 * Author: Tomi Valkeinen <tomi.valkeinen@ti.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #define DSS_SUBSYS_NAME "APPLY"
19
20 #include <linux/kernel.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/jiffies.h>
24
25 #include <video/omapdss.h>
26
27 #include "dss.h"
28 #include "dss_features.h"
29
30 /*
31 * We have 4 levels of cache for the dispc settings. First two are in SW and
32 * the latter two in HW.
33 *
34 * +--------------------+
35 * |overlay/manager_info|
36 * +--------------------+
37 * v
38 * apply()
39 * v
40 * +--------------------+
41 * | dss_cache |
42 * +--------------------+
43 * v
44 * configure()
45 * v
46 * +--------------------+
47 * | shadow registers |
48 * +--------------------+
49 * v
50 * VFP or lcd/digit_enable
51 * v
52 * +--------------------+
53 * | registers |
54 * +--------------------+
55 */
56
57 struct overlay_cache_data {
58 /* If true, cache changed, but not written to shadow registers. Set
59 * in apply(), cleared when registers written. */
60 bool dirty;
61 /* If true, shadow registers contain changed values not yet in real
62 * registers. Set when writing to shadow registers, cleared at
63 * VSYNC/EVSYNC */
64 bool shadow_dirty;
65
66 bool enabled;
67
68 struct omap_overlay_info info;
69
70 enum omap_channel channel;
71
72 u32 fifo_low;
73 u32 fifo_high;
74 };
75
76 struct manager_cache_data {
77 /* If true, cache changed, but not written to shadow registers. Set
78 * in apply(), cleared when registers written. */
79 bool dirty;
80 /* If true, shadow registers contain changed values not yet in real
81 * registers. Set when writing to shadow registers, cleared at
82 * VSYNC/EVSYNC */
83 bool shadow_dirty;
84
85 struct omap_overlay_manager_info info;
86
87 bool manual_update;
88 bool do_manual_update;
89 };
90
91 static struct {
92 spinlock_t lock;
93 struct overlay_cache_data overlay_cache[MAX_DSS_OVERLAYS];
94 struct manager_cache_data manager_cache[MAX_DSS_MANAGERS];
95
96 bool irq_enabled;
97 } dss_cache;
98
99 void dss_apply_init(void)
100 {
101 spin_lock_init(&dss_cache.lock);
102 }
103
104 static bool ovl_manual_update(struct omap_overlay *ovl)
105 {
106 return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
107 }
108
109 static bool mgr_manual_update(struct omap_overlay_manager *mgr)
110 {
111 return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
112 }
113
114 static int overlay_enabled(struct omap_overlay *ovl)
115 {
116 return ovl->info.enabled && ovl->manager && ovl->manager->device;
117 }
118
119 int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
120 {
121 unsigned long timeout = msecs_to_jiffies(500);
122 struct manager_cache_data *mc;
123 u32 irq;
124 int r;
125 int i;
126 struct omap_dss_device *dssdev = mgr->device;
127
128 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
129 return 0;
130
131 if (mgr_manual_update(mgr))
132 return 0;
133
134 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
135 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
136 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
137 } else {
138 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
139 DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
140 }
141
142 mc = &dss_cache.manager_cache[mgr->id];
143 i = 0;
144 while (1) {
145 unsigned long flags;
146 bool shadow_dirty, dirty;
147
148 spin_lock_irqsave(&dss_cache.lock, flags);
149 dirty = mc->dirty;
150 shadow_dirty = mc->shadow_dirty;
151 spin_unlock_irqrestore(&dss_cache.lock, flags);
152
153 if (!dirty && !shadow_dirty) {
154 r = 0;
155 break;
156 }
157
158 /* 4 iterations is the worst case:
159 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
160 * 2 - first VSYNC, dirty = true
161 * 3 - dirty = false, shadow_dirty = true
162 * 4 - shadow_dirty = false */
163 if (i++ == 3) {
164 DSSERR("mgr(%d)->wait_for_go() not finishing\n",
165 mgr->id);
166 r = 0;
167 break;
168 }
169
170 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
171 if (r == -ERESTARTSYS)
172 break;
173
174 if (r) {
175 DSSERR("mgr(%d)->wait_for_go() timeout\n", mgr->id);
176 break;
177 }
178 }
179
180 return r;
181 }
182
183 int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
184 {
185 unsigned long timeout = msecs_to_jiffies(500);
186 struct overlay_cache_data *oc;
187 struct omap_dss_device *dssdev;
188 u32 irq;
189 int r;
190 int i;
191
192 if (!ovl->manager)
193 return 0;
194
195 dssdev = ovl->manager->device;
196
197 if (!dssdev || dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
198 return 0;
199
200 if (ovl_manual_update(ovl))
201 return 0;
202
203 if (dssdev->type == OMAP_DISPLAY_TYPE_VENC
204 || dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
205 irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
206 } else {
207 irq = (dssdev->manager->id == OMAP_DSS_CHANNEL_LCD) ?
208 DISPC_IRQ_VSYNC : DISPC_IRQ_VSYNC2;
209 }
210
211 oc = &dss_cache.overlay_cache[ovl->id];
212 i = 0;
213 while (1) {
214 unsigned long flags;
215 bool shadow_dirty, dirty;
216
217 spin_lock_irqsave(&dss_cache.lock, flags);
218 dirty = oc->dirty;
219 shadow_dirty = oc->shadow_dirty;
220 spin_unlock_irqrestore(&dss_cache.lock, flags);
221
222 if (!dirty && !shadow_dirty) {
223 r = 0;
224 break;
225 }
226
227 /* 4 iterations is the worst case:
228 * 1 - initial iteration, dirty = true (between VFP and VSYNC)
229 * 2 - first VSYNC, dirty = true
230 * 3 - dirty = false, shadow_dirty = true
231 * 4 - shadow_dirty = false */
232 if (i++ == 3) {
233 DSSERR("ovl(%d)->wait_for_go() not finishing\n",
234 ovl->id);
235 r = 0;
236 break;
237 }
238
239 r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
240 if (r == -ERESTARTSYS)
241 break;
242
243 if (r) {
244 DSSERR("ovl(%d)->wait_for_go() timeout\n", ovl->id);
245 break;
246 }
247 }
248
249 return r;
250 }
251
252 static int configure_overlay(enum omap_plane plane)
253 {
254 struct omap_overlay *ovl;
255 struct overlay_cache_data *c;
256 struct omap_overlay_info *oi;
257 bool ilace, replication;
258 int r;
259
260 DSSDBGF("%d", plane);
261
262 c = &dss_cache.overlay_cache[plane];
263 oi = &c->info;
264
265 if (!c->enabled) {
266 dispc_ovl_enable(plane, 0);
267 return 0;
268 }
269
270 ovl = omap_dss_get_overlay(plane);
271
272 replication = dss_use_replication(ovl->manager->device, oi->color_mode);
273
274 ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
275
276 dispc_ovl_set_channel_out(plane, c->channel);
277
278 r = dispc_ovl_setup(plane, oi, ilace, replication);
279 if (r) {
280 /* this shouldn't happen */
281 DSSERR("dispc_ovl_setup failed for ovl %d\n", plane);
282 dispc_ovl_enable(plane, 0);
283 return r;
284 }
285
286 dispc_ovl_set_fifo_threshold(plane, c->fifo_low, c->fifo_high);
287
288 dispc_ovl_enable(plane, 1);
289
290 return 0;
291 }
292
293 static void configure_manager(enum omap_channel channel)
294 {
295 struct omap_overlay_manager_info *mi;
296
297 DSSDBGF("%d", channel);
298
299 /* picking info from the cache */
300 mi = &dss_cache.manager_cache[channel].info;
301
302 dispc_mgr_setup(channel, mi);
303 }
304
305 /* configure_dispc() tries to write values from cache to shadow registers.
306 * It writes only to those managers/overlays that are not busy.
307 * returns 0 if everything could be written to shadow registers.
308 * returns 1 if not everything could be written to shadow registers. */
309 static int configure_dispc(void)
310 {
311 struct overlay_cache_data *oc;
312 struct manager_cache_data *mc;
313 const int num_ovls = dss_feat_get_num_ovls();
314 const int num_mgrs = dss_feat_get_num_mgrs();
315 int i;
316 int r;
317 bool mgr_busy[MAX_DSS_MANAGERS];
318 bool mgr_go[MAX_DSS_MANAGERS];
319 bool busy;
320
321 r = 0;
322 busy = false;
323
324 for (i = 0; i < num_mgrs; i++) {
325 mgr_busy[i] = dispc_mgr_go_busy(i);
326 mgr_go[i] = false;
327 }
328
329 /* Commit overlay settings */
330 for (i = 0; i < num_ovls; ++i) {
331 oc = &dss_cache.overlay_cache[i];
332 mc = &dss_cache.manager_cache[oc->channel];
333
334 if (!oc->dirty)
335 continue;
336
337 if (mc->manual_update && !mc->do_manual_update)
338 continue;
339
340 if (mgr_busy[oc->channel]) {
341 busy = true;
342 continue;
343 }
344
345 r = configure_overlay(i);
346 if (r)
347 DSSERR("configure_overlay %d failed\n", i);
348
349 oc->dirty = false;
350 oc->shadow_dirty = true;
351 mgr_go[oc->channel] = true;
352 }
353
354 /* Commit manager settings */
355 for (i = 0; i < num_mgrs; ++i) {
356 mc = &dss_cache.manager_cache[i];
357
358 if (!mc->dirty)
359 continue;
360
361 if (mc->manual_update && !mc->do_manual_update)
362 continue;
363
364 if (mgr_busy[i]) {
365 busy = true;
366 continue;
367 }
368
369 configure_manager(i);
370 mc->dirty = false;
371 mc->shadow_dirty = true;
372 mgr_go[i] = true;
373 }
374
375 /* set GO */
376 for (i = 0; i < num_mgrs; ++i) {
377 mc = &dss_cache.manager_cache[i];
378
379 if (!mgr_go[i])
380 continue;
381
382 /* We don't need GO with manual update display. LCD iface will
383 * always be turned off after frame, and new settings will be
384 * taken in to use at next update */
385 if (!mc->manual_update)
386 dispc_mgr_go(i);
387 }
388
389 if (busy)
390 r = 1;
391 else
392 r = 0;
393
394 return r;
395 }
396
397 void dss_mgr_start_update(struct omap_overlay_manager *mgr)
398 {
399 struct manager_cache_data *mc;
400 struct overlay_cache_data *oc;
401 const int num_ovls = dss_feat_get_num_ovls();
402 const int num_mgrs = dss_feat_get_num_mgrs();
403 int i;
404
405 mc = &dss_cache.manager_cache[mgr->id];
406
407 mc->do_manual_update = true;
408 configure_dispc();
409 mc->do_manual_update = false;
410
411 for (i = 0; i < num_ovls; ++i) {
412 oc = &dss_cache.overlay_cache[i];
413 if (oc->channel != mgr->id)
414 continue;
415
416 oc->shadow_dirty = false;
417 }
418
419 for (i = 0; i < num_mgrs; ++i) {
420 mc = &dss_cache.manager_cache[i];
421 if (mgr->id != i)
422 continue;
423
424 mc->shadow_dirty = false;
425 }
426
427 dispc_mgr_enable(mgr->id, true);
428 }
429
430 static void dss_apply_irq_handler(void *data, u32 mask)
431 {
432 struct manager_cache_data *mc;
433 struct overlay_cache_data *oc;
434 const int num_ovls = dss_feat_get_num_ovls();
435 const int num_mgrs = dss_feat_get_num_mgrs();
436 int i, r;
437 bool mgr_busy[MAX_DSS_MANAGERS];
438 u32 irq_mask;
439
440 for (i = 0; i < num_mgrs; i++)
441 mgr_busy[i] = dispc_mgr_go_busy(i);
442
443 spin_lock(&dss_cache.lock);
444
445 for (i = 0; i < num_ovls; ++i) {
446 oc = &dss_cache.overlay_cache[i];
447 if (!mgr_busy[oc->channel])
448 oc->shadow_dirty = false;
449 }
450
451 for (i = 0; i < num_mgrs; ++i) {
452 mc = &dss_cache.manager_cache[i];
453 if (!mgr_busy[i])
454 mc->shadow_dirty = false;
455 }
456
457 r = configure_dispc();
458 if (r == 1)
459 goto end;
460
461 /* re-read busy flags */
462 for (i = 0; i < num_mgrs; i++)
463 mgr_busy[i] = dispc_mgr_go_busy(i);
464
465 /* keep running as long as there are busy managers, so that
466 * we can collect overlay-applied information */
467 for (i = 0; i < num_mgrs; ++i) {
468 if (mgr_busy[i])
469 goto end;
470 }
471
472 irq_mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
473 DISPC_IRQ_EVSYNC_EVEN;
474 if (dss_has_feature(FEAT_MGR_LCD2))
475 irq_mask |= DISPC_IRQ_VSYNC2;
476
477 omap_dispc_unregister_isr(dss_apply_irq_handler, NULL, irq_mask);
478 dss_cache.irq_enabled = false;
479
480 end:
481 spin_unlock(&dss_cache.lock);
482 }
483
484 static int omap_dss_mgr_apply_ovl(struct omap_overlay *ovl)
485 {
486 struct overlay_cache_data *oc;
487 struct omap_dss_device *dssdev;
488
489 oc = &dss_cache.overlay_cache[ovl->id];
490
491 if (ovl->manager_changed) {
492 ovl->manager_changed = false;
493 ovl->info_dirty = true;
494 }
495
496 if (!overlay_enabled(ovl)) {
497 if (oc->enabled) {
498 oc->enabled = false;
499 oc->dirty = true;
500 }
501 return 0;
502 }
503
504 if (!ovl->info_dirty)
505 return 0;
506
507 dssdev = ovl->manager->device;
508
509 if (dss_check_overlay(ovl, dssdev)) {
510 if (oc->enabled) {
511 oc->enabled = false;
512 oc->dirty = true;
513 }
514 return -EINVAL;
515 }
516
517 ovl->info_dirty = false;
518 oc->dirty = true;
519 oc->info = ovl->info;
520
521 oc->channel = ovl->manager->id;
522
523 oc->enabled = true;
524
525 return 0;
526 }
527
528 static void omap_dss_mgr_apply_mgr(struct omap_overlay_manager *mgr)
529 {
530 struct manager_cache_data *mc;
531
532 mc = &dss_cache.manager_cache[mgr->id];
533
534 if (mgr->device_changed) {
535 mgr->device_changed = false;
536 mgr->info_dirty = true;
537 }
538
539 if (!mgr->info_dirty)
540 return;
541
542 if (!mgr->device)
543 return;
544
545 mgr->info_dirty = false;
546 mc->dirty = true;
547 mc->info = mgr->info;
548
549 mc->manual_update = mgr_manual_update(mgr);
550 }
551
552 static void omap_dss_mgr_apply_ovl_fifos(struct omap_overlay *ovl)
553 {
554 struct overlay_cache_data *oc;
555 struct omap_dss_device *dssdev;
556 u32 size, burst_size;
557
558 oc = &dss_cache.overlay_cache[ovl->id];
559
560 if (!oc->enabled)
561 return;
562
563 dssdev = ovl->manager->device;
564
565 size = dispc_ovl_get_fifo_size(ovl->id);
566
567 burst_size = dispc_ovl_get_burst_size(ovl->id);
568
569 switch (dssdev->type) {
570 case OMAP_DISPLAY_TYPE_DPI:
571 case OMAP_DISPLAY_TYPE_DBI:
572 case OMAP_DISPLAY_TYPE_SDI:
573 case OMAP_DISPLAY_TYPE_VENC:
574 case OMAP_DISPLAY_TYPE_HDMI:
575 default_get_overlay_fifo_thresholds(ovl->id, size,
576 burst_size, &oc->fifo_low,
577 &oc->fifo_high);
578 break;
579 #ifdef CONFIG_OMAP2_DSS_DSI
580 case OMAP_DISPLAY_TYPE_DSI:
581 dsi_get_overlay_fifo_thresholds(ovl->id, size,
582 burst_size, &oc->fifo_low,
583 &oc->fifo_high);
584 break;
585 #endif
586 default:
587 BUG();
588 }
589 }
590
591 int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
592 {
593 int i, r;
594 unsigned long flags;
595
596 DSSDBG("omap_dss_mgr_apply(%s)\n", mgr->name);
597
598 r = dispc_runtime_get();
599 if (r)
600 return r;
601
602 spin_lock_irqsave(&dss_cache.lock, flags);
603
604 /* Configure overlays */
605 for (i = 0; i < mgr->num_overlays; ++i) {
606 struct omap_overlay *ovl;
607
608 ovl = mgr->overlays[i];
609
610 if (ovl->manager != mgr)
611 continue;
612
613 omap_dss_mgr_apply_ovl(ovl);
614 }
615
616 /* Configure manager */
617 omap_dss_mgr_apply_mgr(mgr);
618
619 /* Configure overlay fifos */
620 for (i = 0; i < mgr->num_overlays; ++i) {
621 struct omap_overlay *ovl;
622
623 ovl = mgr->overlays[i];
624
625 if (ovl->manager != mgr)
626 continue;
627
628 omap_dss_mgr_apply_ovl_fifos(ovl);
629 }
630
631 r = 0;
632 if (!dss_cache.irq_enabled) {
633 u32 mask;
634
635 mask = DISPC_IRQ_VSYNC | DISPC_IRQ_EVSYNC_ODD |
636 DISPC_IRQ_EVSYNC_EVEN;
637 if (dss_has_feature(FEAT_MGR_LCD2))
638 mask |= DISPC_IRQ_VSYNC2;
639
640 r = omap_dispc_register_isr(dss_apply_irq_handler, NULL, mask);
641
642 if (r)
643 DSSERR("failed to register apply isr\n");
644
645 dss_cache.irq_enabled = true;
646 }
647
648 configure_dispc();
649
650 spin_unlock_irqrestore(&dss_cache.lock, flags);
651
652 dispc_runtime_put();
653
654 return r;
655 }
656
657 void dss_mgr_enable(struct omap_overlay_manager *mgr)
658 {
659 dispc_mgr_enable(mgr->id, true);
660 }
661
662 void dss_mgr_disable(struct omap_overlay_manager *mgr)
663 {
664 dispc_mgr_enable(mgr->id, false);
665 }
666
This page took 0.073964 seconds and 5 git commands to generate.