drm/msm/mdp5: update irqs on crtc<->encoder link change
[deliverable/linux.git] / drivers / gpu / drm / msm / mdp / mdp_kms.c
1 /*
2 * Copyright (C) 2013 Red Hat
3 * Author: Rob Clark <robdclark@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18
19 #include "msm_drv.h"
20 #include "mdp_kms.h"
21
22
23 struct mdp_irq_wait {
24 struct mdp_irq irq;
25 int count;
26 };
27
28 static DECLARE_WAIT_QUEUE_HEAD(wait_event);
29
30 static DEFINE_SPINLOCK(list_lock);
31
32 static void update_irq(struct mdp_kms *mdp_kms)
33 {
34 struct mdp_irq *irq;
35 uint32_t irqmask = mdp_kms->vblank_mask;
36
37 BUG_ON(!spin_is_locked(&list_lock));
38
39 list_for_each_entry(irq, &mdp_kms->irq_list, node)
40 irqmask |= irq->irqmask;
41
42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
43 }
44
45 /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
46 * link changes, this must be called to figure out the new global irqmask
47 */
48 void mdp_irq_update(struct mdp_kms *mdp_kms)
49 {
50 unsigned long flags;
51 spin_lock_irqsave(&list_lock, flags);
52 update_irq(mdp_kms);
53 spin_unlock_irqrestore(&list_lock, flags);
54 }
55
56 void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
57 {
58 struct mdp_irq *handler, *n;
59 unsigned long flags;
60
61 spin_lock_irqsave(&list_lock, flags);
62 mdp_kms->in_irq = true;
63 list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
64 if (handler->irqmask & status) {
65 spin_unlock_irqrestore(&list_lock, flags);
66 handler->irq(handler, handler->irqmask & status);
67 spin_lock_irqsave(&list_lock, flags);
68 }
69 }
70 mdp_kms->in_irq = false;
71 update_irq(mdp_kms);
72 spin_unlock_irqrestore(&list_lock, flags);
73
74 }
75
76 void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
77 {
78 unsigned long flags;
79
80 spin_lock_irqsave(&list_lock, flags);
81 if (enable)
82 mdp_kms->vblank_mask |= mask;
83 else
84 mdp_kms->vblank_mask &= ~mask;
85 update_irq(mdp_kms);
86 spin_unlock_irqrestore(&list_lock, flags);
87 }
88
89 static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
90 {
91 struct mdp_irq_wait *wait =
92 container_of(irq, struct mdp_irq_wait, irq);
93 wait->count--;
94 wake_up_all(&wait_event);
95 }
96
97 void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
98 {
99 struct mdp_irq_wait wait = {
100 .irq = {
101 .irq = wait_irq,
102 .irqmask = irqmask,
103 },
104 .count = 1,
105 };
106 mdp_irq_register(mdp_kms, &wait.irq);
107 wait_event_timeout(wait_event, (wait.count <= 0),
108 msecs_to_jiffies(100));
109 mdp_irq_unregister(mdp_kms, &wait.irq);
110 }
111
112 void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
113 {
114 unsigned long flags;
115 bool needs_update = false;
116
117 spin_lock_irqsave(&list_lock, flags);
118
119 if (!irq->registered) {
120 irq->registered = true;
121 list_add(&irq->node, &mdp_kms->irq_list);
122 needs_update = !mdp_kms->in_irq;
123 }
124
125 spin_unlock_irqrestore(&list_lock, flags);
126
127 if (needs_update)
128 mdp_irq_update(mdp_kms);
129 }
130
131 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
132 {
133 unsigned long flags;
134 bool needs_update = false;
135
136 spin_lock_irqsave(&list_lock, flags);
137
138 if (irq->registered) {
139 irq->registered = false;
140 list_del(&irq->node);
141 needs_update = !mdp_kms->in_irq;
142 }
143
144 spin_unlock_irqrestore(&list_lock, flags);
145
146 if (needs_update)
147 mdp_irq_update(mdp_kms);
148 }
This page took 0.049872 seconds and 5 git commands to generate.