Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/seq_file.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/irq.h> | |
19 | #include <linux/kernel_stat.h> | |
20 | #include <linux/uaccess.h> | |
21 | #include <hv/drv_pcie_rc_intf.h> | |
fb702b94 CM |
22 | #include <arch/spr_def.h> |
23 | #include <asm/traps.h> | |
8d61dd7d | 24 | #include <linux/perf_event.h> |
fb702b94 CM |
25 | |
26 | /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ | |
27 | #define IS_HW_CLEARED 1 | |
867e359b CM |
28 | |
29 | /* | |
5d966115 | 30 | * The set of interrupts we enable for arch_local_irq_enable(). |
867e359b CM |
31 | * This is initialized to have just a single interrupt that the kernel |
32 | * doesn't actually use as a sentinel. During kernel init, | |
33 | * interrupts are added as the kernel gets prepared to support them. | |
34 | * NOTE: we could probably initialize them all statically up front. | |
35 | */ | |
36 | DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = | |
37 | INITIAL_INTERRUPTS_ENABLED; | |
38 | EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); | |
39 | ||
fb702b94 | 40 | /* Define per-tile device interrupt statistics state. */ |
867e359b CM |
41 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; |
42 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
43 | ||
fb702b94 CM |
44 | /* |
45 | * Define per-tile irq disable mask; the hardware/HV only has a single | |
46 | * mask that we use to implement both masking and disabling. | |
47 | */ | |
48 | static DEFINE_PER_CPU(unsigned long, irq_disable_mask) | |
49 | ____cacheline_internodealigned_in_smp; | |
50 | ||
51 | /* | |
52 | * Per-tile IRQ nesting depth. Used to make sure we enable newly | |
53 | * enabled IRQs before exiting the outermost interrupt. | |
54 | */ | |
55 | static DEFINE_PER_CPU(int, irq_depth); | |
56 | ||
fb702b94 CM |
57 | #if CHIP_HAS_IPI() |
58 | /* Use SPRs to manipulate device interrupts. */ | |
a78c942d CM |
59 | #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) |
60 | #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask) | |
61 | #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask) | |
fb702b94 CM |
62 | #else |
63 | /* Use HV to manipulate device interrupts. */ | |
64 | #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) | |
65 | #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) | |
66 | #define clear_irqs(irq_mask) hv_clear_intr(irq_mask) | |
67 | #endif | |
867e359b CM |
68 | |
69 | /* | |
fb702b94 | 70 | * The interrupt handling path, implemented in terms of HV interrupt |
d7c96611 | 71 | * emulation on TILEPro, and IPI hardware on TILE-Gx. |
bc1a298f | 72 | * Entered with interrupts disabled. |
867e359b CM |
73 | */ |
74 | void tile_dev_intr(struct pt_regs *regs, int intnum) | |
75 | { | |
fb702b94 CM |
76 | int depth = __get_cpu_var(irq_depth)++; |
77 | unsigned long original_irqs; | |
78 | unsigned long remaining_irqs; | |
79 | struct pt_regs *old_regs; | |
867e359b | 80 | |
fb702b94 | 81 | #if CHIP_HAS_IPI() |
867e359b | 82 | /* |
fb702b94 CM |
83 | * Pending interrupts are listed in an SPR. We might be |
84 | * nested, so be sure to only handle irqs that weren't already | |
85 | * masked by a previous interrupt. Then, mask out the ones | |
86 | * we're going to handle. | |
867e359b | 87 | */ |
a78c942d CM |
88 | unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K); |
89 | original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked; | |
90 | __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs); | |
fb702b94 CM |
91 | #else |
92 | /* | |
93 | * Hypervisor performs the equivalent of the Gx code above and | |
94 | * then puts the pending interrupt mask into a system save reg | |
95 | * for us to find. | |
96 | */ | |
a78c942d | 97 | original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3); |
fb702b94 CM |
98 | #endif |
99 | remaining_irqs = original_irqs; | |
867e359b CM |
100 | |
101 | /* Track time spent here in an interrupt context. */ | |
fb702b94 | 102 | old_regs = set_irq_regs(regs); |
867e359b CM |
103 | irq_enter(); |
104 | ||
105 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
106 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | |
107 | { | |
108 | long sp = stack_pointer - (long) current_thread_info(); | |
109 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | |
fb702b94 | 110 | pr_emerg("tile_dev_intr: " |
867e359b CM |
111 | "stack overflow: %ld\n", |
112 | sp - sizeof(struct thread_info)); | |
113 | dump_stack(); | |
114 | } | |
115 | } | |
116 | #endif | |
fb702b94 CM |
117 | while (remaining_irqs) { |
118 | unsigned long irq = __ffs(remaining_irqs); | |
119 | remaining_irqs &= ~(1UL << irq); | |
867e359b | 120 | |
fb702b94 CM |
121 | /* Count device irqs; Linux IPIs are counted elsewhere. */ |
122 | if (irq != IRQ_RESCHEDULE) | |
123 | __get_cpu_var(irq_stat).irq_dev_intr_count++; | |
867e359b | 124 | |
fb702b94 | 125 | generic_handle_irq(irq); |
867e359b CM |
126 | } |
127 | ||
fb702b94 CM |
128 | /* |
129 | * If we weren't nested, turn on all enabled interrupts, | |
130 | * including any that were reenabled during interrupt | |
131 | * handling. | |
132 | */ | |
133 | if (depth == 0) | |
134 | unmask_irqs(~__get_cpu_var(irq_disable_mask)); | |
135 | ||
136 | __get_cpu_var(irq_depth)--; | |
137 | ||
867e359b CM |
138 | /* |
139 | * Track time spent against the current process again and | |
140 | * process any softirqs if they are waiting. | |
141 | */ | |
142 | irq_exit(); | |
143 | set_irq_regs(old_regs); | |
144 | } | |
145 | ||
146 | ||
fb702b94 CM |
147 | /* |
148 | * Remove an irq from the disabled mask. If we're in an interrupt | |
149 | * context, defer enabling the HW interrupt until we leave. | |
150 | */ | |
0c90547b | 151 | static void tile_irq_chip_enable(struct irq_data *d) |
fb702b94 | 152 | { |
0c90547b | 153 | get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); |
fb702b94 | 154 | if (__get_cpu_var(irq_depth) == 0) |
0c90547b | 155 | unmask_irqs(1UL << d->irq); |
fb702b94 CM |
156 | put_cpu_var(irq_disable_mask); |
157 | } | |
fb702b94 CM |
158 | |
159 | /* | |
160 | * Add an irq to the disabled mask. We disable the HW interrupt | |
161 | * immediately so that there's no possibility of it firing. If we're | |
162 | * in an interrupt context, the return path is careful to avoid | |
163 | * unmasking a newly disabled interrupt. | |
164 | */ | |
0c90547b | 165 | static void tile_irq_chip_disable(struct irq_data *d) |
fb702b94 | 166 | { |
0c90547b CM |
167 | get_cpu_var(irq_disable_mask) |= (1UL << d->irq); |
168 | mask_irqs(1UL << d->irq); | |
fb702b94 CM |
169 | put_cpu_var(irq_disable_mask); |
170 | } | |
fb702b94 | 171 | |
867e359b | 172 | /* Mask an interrupt. */ |
f5b42c93 | 173 | static void tile_irq_chip_mask(struct irq_data *d) |
867e359b | 174 | { |
f5b42c93 | 175 | mask_irqs(1UL << d->irq); |
867e359b CM |
176 | } |
177 | ||
178 | /* Unmask an interrupt. */ | |
f5b42c93 | 179 | static void tile_irq_chip_unmask(struct irq_data *d) |
867e359b | 180 | { |
f5b42c93 | 181 | unmask_irqs(1UL << d->irq); |
867e359b CM |
182 | } |
183 | ||
184 | /* | |
fb702b94 CM |
185 | * Clear an interrupt before processing it so that any new assertions |
186 | * will trigger another irq. | |
867e359b | 187 | */ |
f5b42c93 | 188 | static void tile_irq_chip_ack(struct irq_data *d) |
867e359b | 189 | { |
f5b42c93 TG |
190 | if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED) |
191 | clear_irqs(1UL << d->irq); | |
867e359b CM |
192 | } |
193 | ||
194 | /* | |
fb702b94 CM |
195 | * For per-cpu interrupts, we need to avoid unmasking any interrupts |
196 | * that we disabled via disable_percpu_irq(). | |
867e359b | 197 | */ |
f5b42c93 | 198 | static void tile_irq_chip_eoi(struct irq_data *d) |
867e359b | 199 | { |
f5b42c93 TG |
200 | if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq))) |
201 | unmask_irqs(1UL << d->irq); | |
867e359b CM |
202 | } |
203 | ||
fb702b94 | 204 | static struct irq_chip tile_irq_chip = { |
d1ea13c6 | 205 | .name = "tile_irq_chip", |
0c90547b CM |
206 | .irq_enable = tile_irq_chip_enable, |
207 | .irq_disable = tile_irq_chip_disable, | |
f5b42c93 TG |
208 | .irq_ack = tile_irq_chip_ack, |
209 | .irq_eoi = tile_irq_chip_eoi, | |
210 | .irq_mask = tile_irq_chip_mask, | |
211 | .irq_unmask = tile_irq_chip_unmask, | |
867e359b CM |
212 | }; |
213 | ||
214 | void __init init_IRQ(void) | |
215 | { | |
fb702b94 | 216 | ipi_init(); |
867e359b CM |
217 | } |
218 | ||
18f894c1 | 219 | void setup_irq_regs(void) |
867e359b | 220 | { |
fb702b94 CM |
221 | /* Enable interrupt delivery. */ |
222 | unmask_irqs(~0UL); | |
223 | #if CHIP_HAS_IPI() | |
5d966115 | 224 | arch_local_irq_unmask(INT_IPI_K); |
fb702b94 | 225 | #endif |
867e359b CM |
226 | } |
227 | ||
fb702b94 | 228 | void tile_irq_activate(unsigned int irq, int tile_irq_type) |
867e359b CM |
229 | { |
230 | /* | |
fb702b94 | 231 | * We use handle_level_irq() by default because the pending |
d7c96611 | 232 | * interrupt vector (whether modeled by the HV on |
fb702b94 CM |
233 | * TILEPro or implemented in hardware on TILE-Gx) has |
234 | * level-style semantics for each bit. An interrupt fires | |
235 | * whenever a bit is high, not just at edges. | |
236 | */ | |
237 | irq_flow_handler_t handle = handle_level_irq; | |
238 | if (tile_irq_type == TILE_IRQ_PERCPU) | |
239 | handle = handle_percpu_irq; | |
1919d641 | 240 | irq_set_chip_and_handler(irq, &tile_irq_chip, handle); |
fb702b94 CM |
241 | |
242 | /* | |
243 | * Flag interrupts that are hardware-cleared so that ack() | |
244 | * won't clear them. | |
867e359b | 245 | */ |
fb702b94 | 246 | if (tile_irq_type == TILE_IRQ_HW_CLEAR) |
1919d641 | 247 | irq_set_chip_data(irq, (void *)IS_HW_CLEARED); |
867e359b | 248 | } |
fb702b94 CM |
249 | EXPORT_SYMBOL(tile_irq_activate); |
250 | ||
867e359b CM |
251 | |
252 | void ack_bad_irq(unsigned int irq) | |
253 | { | |
fb702b94 | 254 | pr_err("unexpected IRQ trap at vector %02x\n", irq); |
867e359b CM |
255 | } |
256 | ||
8d61dd7d ZL |
257 | /* |
258 | * /proc/interrupts printing: | |
259 | */ | |
260 | int arch_show_interrupts(struct seq_file *p, int prec) | |
261 | { | |
262 | #ifdef CONFIG_PERF_EVENTS | |
263 | int i; | |
264 | ||
265 | seq_printf(p, "%*s: ", prec, "PMI"); | |
266 | ||
267 | for_each_online_cpu(i) | |
268 | seq_printf(p, "%10llu ", per_cpu(perf_irqs, i)); | |
269 | seq_puts(p, " perf_events\n"); | |
270 | #endif | |
271 | return 0; | |
272 | } | |
273 | ||
fb702b94 | 274 | #if CHIP_HAS_IPI() |
b26d851f | 275 | int arch_setup_hwirq(unsigned int irq, int node) |
6ef40512 | 276 | { |
b26d851f | 277 | return irq >= NR_IRQS ? -EINVAL : 0; |
6ef40512 TG |
278 | } |
279 | ||
b26d851f | 280 | void arch_teardown_hwirq(unsigned int irq) { } |
fb702b94 | 281 | #endif |