ARC: Fix coding style issues
[deliverable/linux.git] / arch / arc / kernel / irq.c
1 /*
2 * Copyright (C) 2011-12 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/of.h>
13 #include <linux/irqdomain.h>
14 #include <asm/sections.h>
15 #include <asm/irq.h>
16 #include <asm/mach_desc.h>
17
18 /*
19 * Early Hardware specific Interrupt setup
20 * -Called very early (start_kernel -> setup_arch -> setup_processor)
21 * -Platform Independent (must for any ARC700)
22 * -Needed for each CPU (hence not foldable into init_IRQ)
23 *
24 * what it does ?
25 * -setup Vector Table Base Reg - in case Linux not linked at 0x8000_0000
26 * -Disable all IRQs (on CPU side)
27 * -Optionally, setup the High priority Interrupts as Level 2 IRQs
28 */
29 void __init arc_init_IRQ(void)
30 {
31 int level_mask = 0;
32
33 write_aux_reg(AUX_INTR_VEC_BASE, _int_vec_base_lds);
34
35 /* Disable all IRQs: enable them as devices request */
36 write_aux_reg(AUX_IENABLE, 0);
37
38 /* setup any high priority Interrupts (Level2 in ARCompact jargon) */
39 #ifdef CONFIG_ARC_IRQ3_LV2
40 level_mask |= (1 << 3);
41 #endif
42 #ifdef CONFIG_ARC_IRQ5_LV2
43 level_mask |= (1 << 5);
44 #endif
45 #ifdef CONFIG_ARC_IRQ6_LV2
46 level_mask |= (1 << 6);
47 #endif
48
49 if (level_mask) {
50 pr_info("Level-2 interrupts bitset %x\n", level_mask);
51 write_aux_reg(AUX_IRQ_LEV, level_mask);
52 }
53 }
54
55 /*
56 * ARC700 core includes a simple on-chip intc supporting
57 * -per IRQ enable/disable
58 * -2 levels of interrupts (high/low)
59 * -all interrupts being level triggered
60 *
61 * To reduce platform code, we assume all IRQs directly hooked-up into intc.
62 * Platforms with external intc, hence cascaded IRQs, are free to over-ride
63 * below, per IRQ.
64 */
65
66 static void arc_mask_irq(struct irq_data *data)
67 {
68 arch_mask_irq(data->irq);
69 }
70
71 static void arc_unmask_irq(struct irq_data *data)
72 {
73 arch_unmask_irq(data->irq);
74 }
75
76 static struct irq_chip onchip_intc = {
77 .name = "ARC In-core Intc",
78 .irq_mask = arc_mask_irq,
79 .irq_unmask = arc_unmask_irq,
80 };
81
82 static int arc_intc_domain_map(struct irq_domain *d, unsigned int irq,
83 irq_hw_number_t hw)
84 {
85 if (irq == TIMER0_IRQ)
86 irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
87 else
88 irq_set_chip_and_handler(irq, &onchip_intc, handle_level_irq);
89
90 return 0;
91 }
92
93 static const struct irq_domain_ops arc_intc_domain_ops = {
94 .xlate = irq_domain_xlate_onecell,
95 .map = arc_intc_domain_map,
96 };
97
98 static struct irq_domain *root_domain;
99
100 void __init init_onchip_IRQ(void)
101 {
102 struct device_node *intc = NULL;
103
104 intc = of_find_compatible_node(NULL, NULL, "snps,arc700-intc");
105 if (!intc)
106 panic("DeviceTree Missing incore intc\n");
107
108 root_domain = irq_domain_add_legacy(intc, NR_IRQS, 0, 0,
109 &arc_intc_domain_ops, NULL);
110
111 if (!root_domain)
112 panic("root irq domain not avail\n");
113
114 /* with this we don't need to export root_domain */
115 irq_set_default_host(root_domain);
116 }
117
118 /*
119 * Late Interrupt system init called from start_kernel for Boot CPU only
120 *
121 * Since slab must already be initialized, platforms can start doing any
122 * needed request_irq( )s
123 */
124 void __init init_IRQ(void)
125 {
126 init_onchip_IRQ();
127
128 /* Any external intc can be setup here */
129 if (machine_desc->init_irq)
130 machine_desc->init_irq();
131
132 #ifdef CONFIG_SMP
133 /* Master CPU can initialize it's side of IPI */
134 if (machine_desc->init_smp)
135 machine_desc->init_smp(smp_processor_id());
136 #endif
137 }
138
139 /*
140 * "C" Entry point for any ARC ISR, called from low level vector handler
141 * @irq is the vector number read from ICAUSE reg of on-chip intc
142 */
143 void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
144 {
145 struct pt_regs *old_regs = set_irq_regs(regs);
146
147 irq_enter();
148 generic_handle_irq(irq);
149 irq_exit();
150 set_irq_regs(old_regs);
151 }
152
153 int __init get_hw_config_num_irq(void)
154 {
155 uint32_t val = read_aux_reg(ARC_REG_VECBASE_BCR);
156
157 switch (val & 0x03) {
158 case 0:
159 return 16;
160 case 1:
161 return 32;
162 case 2:
163 return 8;
164 default:
165 return 0;
166 }
167
168 return 0;
169 }
170
171 /*
172 * arch_local_irq_enable - Enable interrupts.
173 *
174 * 1. Explicitly called to re-enable interrupts
175 * 2. Implicitly called from spin_unlock_irq, write_unlock_irq etc
176 * which maybe in hard ISR itself
177 *
178 * Semantics of this function change depending on where it is called from:
179 *
180 * -If called from hard-ISR, it must not invert interrupt priorities
181 * e.g. suppose TIMER is high priority (Level 2) IRQ
182 * Time hard-ISR, timer_interrupt( ) calls spin_unlock_irq several times.
183 * Here local_irq_enable( ) shd not re-enable lower priority interrupts
184 * -If called from soft-ISR, it must re-enable all interrupts
185 * soft ISR are low prioity jobs which can be very slow, thus all IRQs
186 * must be enabled while they run.
187 * Now hardware context wise we may still be in L2 ISR (not done rtie)
188 * still we must re-enable both L1 and L2 IRQs
189 * Another twist is prev scenario with flow being
190 * L1 ISR ==> interrupted by L2 ISR ==> L2 soft ISR
191 * here we must not re-enable Ll as prev Ll Interrupt's h/w context will get
192 * over-written (this is deficiency in ARC700 Interrupt mechanism)
193 */
194
195 #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS /* Complex version for 2 IRQ levels */
196
197 void arch_local_irq_enable(void)
198 {
199
200 unsigned long flags;
201 flags = arch_local_save_flags();
202
203 /* Allow both L1 and L2 at the onset */
204 flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
205
206 /* Called from hard ISR (between irq_enter and irq_exit) */
207 if (in_irq()) {
208
209 /* If in L2 ISR, don't re-enable any further IRQs as this can
210 * cause IRQ priorities to get upside down. e.g. it could allow
211 * L1 be taken while in L2 hard ISR which is wrong not only in
212 * theory, it can also cause the dreaded L1-L2-L1 scenario
213 */
214 if (flags & STATUS_A2_MASK)
215 flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK);
216
217 /* Even if in L1 ISR, allowe Higher prio L2 IRQs */
218 else if (flags & STATUS_A1_MASK)
219 flags &= ~(STATUS_E1_MASK);
220 }
221
222 /* called from soft IRQ, ideally we want to re-enable all levels */
223
224 else if (in_softirq()) {
225
226 /* However if this is case of L1 interrupted by L2,
227 * re-enabling both may cause whaco L1-L2-L1 scenario
228 * because ARC700 allows level 1 to interrupt an active L2 ISR
229 * Thus we disable both
230 * However some code, executing in soft ISR wants some IRQs
231 * to be enabled so we re-enable L2 only
232 *
233 * How do we determine L1 intr by L2
234 * -A2 is set (means in L2 ISR)
235 * -E1 is set in this ISR's pt_regs->status32 which is
236 * saved copy of status32_l2 when l2 ISR happened
237 */
238 struct pt_regs *pt = get_irq_regs();
239 if ((flags & STATUS_A2_MASK) && pt &&
240 (pt->status32 & STATUS_A1_MASK)) {
241 /*flags &= ~(STATUS_E1_MASK | STATUS_E2_MASK); */
242 flags &= ~(STATUS_E1_MASK);
243 }
244 }
245
246 arch_local_irq_restore(flags);
247 }
248
249 #else /* ! CONFIG_ARC_COMPACT_IRQ_LEVELS */
250
251 /*
252 * Simpler version for only 1 level of interrupt
253 * Here we only Worry about Level 1 Bits
254 */
255 void arch_local_irq_enable(void)
256 {
257 unsigned long flags;
258
259 /*
260 * ARC IDE Drivers tries to re-enable interrupts from hard-isr
261 * context which is simply wrong
262 */
263 if (in_irq()) {
264 WARN_ONCE(1, "IRQ enabled from hard-isr");
265 return;
266 }
267
268 flags = arch_local_save_flags();
269 flags |= (STATUS_E1_MASK | STATUS_E2_MASK);
270 arch_local_irq_restore(flags);
271 }
272 #endif
273 EXPORT_SYMBOL(arch_local_irq_enable);
This page took 0.111957 seconds and 5 git commands to generate.