Commit | Line | Data |
---|---|---|
867e359b CM |
1 | /* |
2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation, version 2. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, but | |
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | |
11 | * NON INFRINGEMENT. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #include <linux/module.h> | |
16 | #include <linux/seq_file.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/irq.h> | |
19 | #include <linux/kernel_stat.h> | |
20 | #include <linux/uaccess.h> | |
21 | #include <hv/drv_pcie_rc_intf.h> | |
fb702b94 CM |
22 | #include <arch/spr_def.h> |
23 | #include <asm/traps.h> | |
8d61dd7d | 24 | #include <linux/perf_event.h> |
fb702b94 CM |
25 | |
26 | /* Bit-flag stored in irq_desc->chip_data to indicate HW-cleared irqs. */ | |
27 | #define IS_HW_CLEARED 1 | |
867e359b CM |
28 | |
29 | /* | |
5d966115 | 30 | * The set of interrupts we enable for arch_local_irq_enable(). |
867e359b CM |
31 | * This is initialized to have just a single interrupt that the kernel |
32 | * doesn't actually use as a sentinel. During kernel init, | |
33 | * interrupts are added as the kernel gets prepared to support them. | |
34 | * NOTE: we could probably initialize them all statically up front. | |
35 | */ | |
36 | DEFINE_PER_CPU(unsigned long long, interrupts_enabled_mask) = | |
37 | INITIAL_INTERRUPTS_ENABLED; | |
38 | EXPORT_PER_CPU_SYMBOL(interrupts_enabled_mask); | |
39 | ||
fb702b94 | 40 | /* Define per-tile device interrupt statistics state. */ |
867e359b CM |
41 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; |
42 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
43 | ||
fb702b94 CM |
44 | /* |
45 | * Define per-tile irq disable mask; the hardware/HV only has a single | |
46 | * mask that we use to implement both masking and disabling. | |
47 | */ | |
48 | static DEFINE_PER_CPU(unsigned long, irq_disable_mask) | |
49 | ____cacheline_internodealigned_in_smp; | |
50 | ||
51 | /* | |
52 | * Per-tile IRQ nesting depth. Used to make sure we enable newly | |
53 | * enabled IRQs before exiting the outermost interrupt. | |
54 | */ | |
55 | static DEFINE_PER_CPU(int, irq_depth); | |
56 | ||
fb702b94 CM |
57 | #if CHIP_HAS_IPI() |
58 | /* Use SPRs to manipulate device interrupts. */ | |
a78c942d CM |
59 | #define mask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_SET_K, irq_mask) |
60 | #define unmask_irqs(irq_mask) __insn_mtspr(SPR_IPI_MASK_RESET_K, irq_mask) | |
61 | #define clear_irqs(irq_mask) __insn_mtspr(SPR_IPI_EVENT_RESET_K, irq_mask) | |
fb702b94 CM |
62 | #else |
63 | /* Use HV to manipulate device interrupts. */ | |
64 | #define mask_irqs(irq_mask) hv_disable_intr(irq_mask) | |
65 | #define unmask_irqs(irq_mask) hv_enable_intr(irq_mask) | |
66 | #define clear_irqs(irq_mask) hv_clear_intr(irq_mask) | |
67 | #endif | |
867e359b CM |
68 | |
69 | /* | |
fb702b94 | 70 | * The interrupt handling path, implemented in terms of HV interrupt |
d7c96611 | 71 | * emulation on TILEPro, and IPI hardware on TILE-Gx. |
bc1a298f | 72 | * Entered with interrupts disabled. |
867e359b CM |
73 | */ |
74 | void tile_dev_intr(struct pt_regs *regs, int intnum) | |
75 | { | |
b4f50191 | 76 | int depth = __this_cpu_inc_return(irq_depth); |
fb702b94 CM |
77 | unsigned long original_irqs; |
78 | unsigned long remaining_irqs; | |
79 | struct pt_regs *old_regs; | |
867e359b | 80 | |
fb702b94 | 81 | #if CHIP_HAS_IPI() |
867e359b | 82 | /* |
fb702b94 CM |
83 | * Pending interrupts are listed in an SPR. We might be |
84 | * nested, so be sure to only handle irqs that weren't already | |
85 | * masked by a previous interrupt. Then, mask out the ones | |
86 | * we're going to handle. | |
867e359b | 87 | */ |
a78c942d CM |
88 | unsigned long masked = __insn_mfspr(SPR_IPI_MASK_K); |
89 | original_irqs = __insn_mfspr(SPR_IPI_EVENT_K) & ~masked; | |
90 | __insn_mtspr(SPR_IPI_MASK_SET_K, original_irqs); | |
fb702b94 CM |
91 | #else |
92 | /* | |
93 | * Hypervisor performs the equivalent of the Gx code above and | |
94 | * then puts the pending interrupt mask into a system save reg | |
95 | * for us to find. | |
96 | */ | |
a78c942d | 97 | original_irqs = __insn_mfspr(SPR_SYSTEM_SAVE_K_3); |
fb702b94 CM |
98 | #endif |
99 | remaining_irqs = original_irqs; | |
867e359b CM |
100 | |
101 | /* Track time spent here in an interrupt context. */ | |
fb702b94 | 102 | old_regs = set_irq_regs(regs); |
867e359b CM |
103 | irq_enter(); |
104 | ||
105 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
106 | /* Debugging check for stack overflow: less than 1/8th stack free? */ | |
107 | { | |
108 | long sp = stack_pointer - (long) current_thread_info(); | |
109 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | |
f4743673 JP |
110 | pr_emerg("%s: stack overflow: %ld\n", |
111 | __func__, sp - sizeof(struct thread_info)); | |
867e359b CM |
112 | dump_stack(); |
113 | } | |
114 | } | |
115 | #endif | |
fb702b94 CM |
116 | while (remaining_irqs) { |
117 | unsigned long irq = __ffs(remaining_irqs); | |
118 | remaining_irqs &= ~(1UL << irq); | |
867e359b | 119 | |
fb702b94 CM |
120 | /* Count device irqs; Linux IPIs are counted elsewhere. */ |
121 | if (irq != IRQ_RESCHEDULE) | |
b4f50191 | 122 | __this_cpu_inc(irq_stat.irq_dev_intr_count); |
867e359b | 123 | |
fb702b94 | 124 | generic_handle_irq(irq); |
867e359b CM |
125 | } |
126 | ||
fb702b94 CM |
127 | /* |
128 | * If we weren't nested, turn on all enabled interrupts, | |
129 | * including any that were reenabled during interrupt | |
130 | * handling. | |
131 | */ | |
b4f50191 CL |
132 | if (depth == 1) |
133 | unmask_irqs(~__this_cpu_read(irq_disable_mask)); | |
fb702b94 | 134 | |
b4f50191 | 135 | __this_cpu_dec(irq_depth); |
fb702b94 | 136 | |
867e359b CM |
137 | /* |
138 | * Track time spent against the current process again and | |
139 | * process any softirqs if they are waiting. | |
140 | */ | |
141 | irq_exit(); | |
142 | set_irq_regs(old_regs); | |
143 | } | |
144 | ||
145 | ||
fb702b94 CM |
146 | /* |
147 | * Remove an irq from the disabled mask. If we're in an interrupt | |
148 | * context, defer enabling the HW interrupt until we leave. | |
149 | */ | |
0c90547b | 150 | static void tile_irq_chip_enable(struct irq_data *d) |
fb702b94 | 151 | { |
0c90547b | 152 | get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq); |
b4f50191 | 153 | if (__this_cpu_read(irq_depth) == 0) |
0c90547b | 154 | unmask_irqs(1UL << d->irq); |
fb702b94 CM |
155 | put_cpu_var(irq_disable_mask); |
156 | } | |
fb702b94 CM |
157 | |
158 | /* | |
159 | * Add an irq to the disabled mask. We disable the HW interrupt | |
160 | * immediately so that there's no possibility of it firing. If we're | |
161 | * in an interrupt context, the return path is careful to avoid | |
162 | * unmasking a newly disabled interrupt. | |
163 | */ | |
0c90547b | 164 | static void tile_irq_chip_disable(struct irq_data *d) |
fb702b94 | 165 | { |
0c90547b CM |
166 | get_cpu_var(irq_disable_mask) |= (1UL << d->irq); |
167 | mask_irqs(1UL << d->irq); | |
fb702b94 CM |
168 | put_cpu_var(irq_disable_mask); |
169 | } | |
fb702b94 | 170 | |
867e359b | 171 | /* Mask an interrupt. */ |
f5b42c93 | 172 | static void tile_irq_chip_mask(struct irq_data *d) |
867e359b | 173 | { |
f5b42c93 | 174 | mask_irqs(1UL << d->irq); |
867e359b CM |
175 | } |
176 | ||
177 | /* Unmask an interrupt. */ | |
f5b42c93 | 178 | static void tile_irq_chip_unmask(struct irq_data *d) |
867e359b | 179 | { |
f5b42c93 | 180 | unmask_irqs(1UL << d->irq); |
867e359b CM |
181 | } |
182 | ||
183 | /* | |
fb702b94 CM |
184 | * Clear an interrupt before processing it so that any new assertions |
185 | * will trigger another irq. | |
867e359b | 186 | */ |
f5b42c93 | 187 | static void tile_irq_chip_ack(struct irq_data *d) |
867e359b | 188 | { |
f5b42c93 TG |
189 | if ((unsigned long)irq_data_get_irq_chip_data(d) != IS_HW_CLEARED) |
190 | clear_irqs(1UL << d->irq); | |
867e359b CM |
191 | } |
192 | ||
193 | /* | |
fb702b94 CM |
194 | * For per-cpu interrupts, we need to avoid unmasking any interrupts |
195 | * that we disabled via disable_percpu_irq(). | |
867e359b | 196 | */ |
f5b42c93 | 197 | static void tile_irq_chip_eoi(struct irq_data *d) |
867e359b | 198 | { |
b4f50191 | 199 | if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq))) |
f5b42c93 | 200 | unmask_irqs(1UL << d->irq); |
867e359b CM |
201 | } |
202 | ||
fb702b94 | 203 | static struct irq_chip tile_irq_chip = { |
d1ea13c6 | 204 | .name = "tile_irq_chip", |
0c90547b CM |
205 | .irq_enable = tile_irq_chip_enable, |
206 | .irq_disable = tile_irq_chip_disable, | |
f5b42c93 TG |
207 | .irq_ack = tile_irq_chip_ack, |
208 | .irq_eoi = tile_irq_chip_eoi, | |
209 | .irq_mask = tile_irq_chip_mask, | |
210 | .irq_unmask = tile_irq_chip_unmask, | |
867e359b CM |
211 | }; |
212 | ||
213 | void __init init_IRQ(void) | |
214 | { | |
fb702b94 | 215 | ipi_init(); |
867e359b CM |
216 | } |
217 | ||
18f894c1 | 218 | void setup_irq_regs(void) |
867e359b | 219 | { |
fb702b94 CM |
220 | /* Enable interrupt delivery. */ |
221 | unmask_irqs(~0UL); | |
222 | #if CHIP_HAS_IPI() | |
5d966115 | 223 | arch_local_irq_unmask(INT_IPI_K); |
fb702b94 | 224 | #endif |
867e359b CM |
225 | } |
226 | ||
fb702b94 | 227 | void tile_irq_activate(unsigned int irq, int tile_irq_type) |
867e359b CM |
228 | { |
229 | /* | |
fb702b94 | 230 | * We use handle_level_irq() by default because the pending |
d7c96611 | 231 | * interrupt vector (whether modeled by the HV on |
fb702b94 CM |
232 | * TILEPro or implemented in hardware on TILE-Gx) has |
233 | * level-style semantics for each bit. An interrupt fires | |
234 | * whenever a bit is high, not just at edges. | |
235 | */ | |
236 | irq_flow_handler_t handle = handle_level_irq; | |
237 | if (tile_irq_type == TILE_IRQ_PERCPU) | |
238 | handle = handle_percpu_irq; | |
1919d641 | 239 | irq_set_chip_and_handler(irq, &tile_irq_chip, handle); |
fb702b94 CM |
240 | |
241 | /* | |
242 | * Flag interrupts that are hardware-cleared so that ack() | |
243 | * won't clear them. | |
867e359b | 244 | */ |
fb702b94 | 245 | if (tile_irq_type == TILE_IRQ_HW_CLEAR) |
1919d641 | 246 | irq_set_chip_data(irq, (void *)IS_HW_CLEARED); |
867e359b | 247 | } |
fb702b94 CM |
248 | EXPORT_SYMBOL(tile_irq_activate); |
249 | ||
867e359b CM |
250 | |
251 | void ack_bad_irq(unsigned int irq) | |
252 | { | |
fb702b94 | 253 | pr_err("unexpected IRQ trap at vector %02x\n", irq); |
867e359b CM |
254 | } |
255 | ||
8d61dd7d ZL |
256 | /* |
257 | * /proc/interrupts printing: | |
258 | */ | |
259 | int arch_show_interrupts(struct seq_file *p, int prec) | |
260 | { | |
261 | #ifdef CONFIG_PERF_EVENTS | |
262 | int i; | |
263 | ||
264 | seq_printf(p, "%*s: ", prec, "PMI"); | |
265 | ||
266 | for_each_online_cpu(i) | |
267 | seq_printf(p, "%10llu ", per_cpu(perf_irqs, i)); | |
268 | seq_puts(p, " perf_events\n"); | |
269 | #endif | |
270 | return 0; | |
271 | } | |
272 | ||
fb702b94 | 273 | #if CHIP_HAS_IPI() |
b26d851f | 274 | int arch_setup_hwirq(unsigned int irq, int node) |
6ef40512 | 275 | { |
b26d851f | 276 | return irq >= NR_IRQS ? -EINVAL : 0; |
6ef40512 TG |
277 | } |
278 | ||
b26d851f | 279 | void arch_teardown_hwirq(unsigned int irq) { } |
fb702b94 | 280 | #endif |