Commit | Line | Data |
---|---|---|
0b05ac6e BH |
1 | /* |
2 | * Copyright 2011 IBM Corporation. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version | |
7 | * 2 of the License, or (at your option) any later version. | |
8 | * | |
9 | */ | |
a1d0d98d | 10 | |
0b05ac6e BH |
11 | #include <linux/types.h> |
12 | #include <linux/kernel.h> | |
13 | #include <linux/irq.h> | |
14 | #include <linux/smp.h> | |
15 | #include <linux/interrupt.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/cpu.h> | |
18 | #include <linux/of.h> | |
19 | #include <linux/spinlock.h> | |
371fefd6 | 20 | #include <linux/module.h> |
0b05ac6e BH |
21 | |
22 | #include <asm/prom.h> | |
23 | #include <asm/io.h> | |
24 | #include <asm/smp.h> | |
25 | #include <asm/irq.h> | |
26 | #include <asm/errno.h> | |
27 | #include <asm/xics.h> | |
371fefd6 | 28 | #include <asm/kvm_ppc.h> |
d4e58e59 | 29 | #include <asm/dbell.h> |
0b05ac6e BH |
30 | |
31 | struct icp_ipl { | |
32 | union { | |
33 | u32 word; | |
34 | u8 bytes[4]; | |
35 | } xirr_poll; | |
36 | union { | |
37 | u32 word; | |
38 | u8 bytes[4]; | |
39 | } xirr; | |
40 | u32 dummy; | |
41 | union { | |
42 | u32 word; | |
43 | u8 bytes[4]; | |
44 | } qirr; | |
45 | u32 link_a; | |
46 | u32 link_b; | |
47 | u32 link_c; | |
48 | }; | |
49 | ||
50 | static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; | |
51 | ||
52 | static inline unsigned int icp_native_get_xirr(void) | |
53 | { | |
54 | int cpu = smp_processor_id(); | |
54695c30 BH |
55 | unsigned int xirr; |
56 | ||
57 | /* Handled an interrupt latched by KVM */ | |
58 | xirr = kvmppc_get_xics_latch(); | |
59 | if (xirr) | |
60 | return xirr; | |
0b05ac6e BH |
61 | |
62 | return in_be32(&icp_native_regs[cpu]->xirr.word); | |
63 | } | |
64 | ||
65 | static inline void icp_native_set_xirr(unsigned int value) | |
66 | { | |
67 | int cpu = smp_processor_id(); | |
68 | ||
69 | out_be32(&icp_native_regs[cpu]->xirr.word, value); | |
70 | } | |
71 | ||
72 | static inline void icp_native_set_cppr(u8 value) | |
73 | { | |
74 | int cpu = smp_processor_id(); | |
75 | ||
76 | out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); | |
77 | } | |
78 | ||
79 | static inline void icp_native_set_qirr(int n_cpu, u8 value) | |
80 | { | |
81 | out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value); | |
82 | } | |
83 | ||
84 | static void icp_native_set_cpu_priority(unsigned char cppr) | |
85 | { | |
86 | xics_set_base_cppr(cppr); | |
87 | icp_native_set_cppr(cppr); | |
88 | iosync(); | |
89 | } | |
90 | ||
137436c9 | 91 | void icp_native_eoi(struct irq_data *d) |
0b05ac6e | 92 | { |
476eb491 | 93 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); |
0b05ac6e BH |
94 | |
95 | iosync(); | |
96 | icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq); | |
97 | } | |
98 | ||
99 | static void icp_native_teardown_cpu(void) | |
100 | { | |
101 | int cpu = smp_processor_id(); | |
102 | ||
103 | /* Clear any pending IPI */ | |
104 | icp_native_set_qirr(cpu, 0xff); | |
105 | } | |
106 | ||
107 | static void icp_native_flush_ipi(void) | |
108 | { | |
109 | /* We take the ipi irq but and never return so we | |
110 | * need to EOI the IPI, but want to leave our priority 0 | |
111 | * | |
112 | * should we check all the other interrupts too? | |
113 | * should we be flagging idle loop instead? | |
114 | * or creating some task to be scheduled? | |
115 | */ | |
116 | ||
117 | icp_native_set_xirr((0x00 << 24) | XICS_IPI); | |
118 | } | |
119 | ||
120 | static unsigned int icp_native_get_irq(void) | |
121 | { | |
122 | unsigned int xirr = icp_native_get_xirr(); | |
123 | unsigned int vec = xirr & 0x00ffffff; | |
124 | unsigned int irq; | |
125 | ||
126 | if (vec == XICS_IRQ_SPURIOUS) | |
127 | return NO_IRQ; | |
128 | ||
d6b0d1f7 | 129 | irq = irq_find_mapping(xics_host, vec); |
0b05ac6e BH |
130 | if (likely(irq != NO_IRQ)) { |
131 | xics_push_cppr(vec); | |
132 | return irq; | |
133 | } | |
134 | ||
135 | /* We don't have a linux mapping, so have rtas mask it. */ | |
136 | xics_mask_unknown_vec(vec); | |
137 | ||
138 | /* We might learn about it later, so EOI it */ | |
139 | icp_native_set_xirr(xirr); | |
140 | ||
141 | return NO_IRQ; | |
142 | } | |
143 | ||
144 | #ifdef CONFIG_SMP | |
145 | ||
23d72bfd | 146 | static void icp_native_cause_ipi(int cpu, unsigned long data) |
0b05ac6e | 147 | { |
54695c30 | 148 | kvmppc_set_host_ipi(cpu, 1); |
d4e58e59 | 149 | #ifdef CONFIG_PPC_DOORBELL |
3609d819 SP |
150 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
151 | if (cpumask_test_cpu(cpu, cpu_sibling_mask(get_cpu()))) { | |
152 | doorbell_cause_ipi(cpu, data); | |
153 | put_cpu(); | |
154 | return; | |
155 | } | |
156 | put_cpu(); | |
157 | } | |
d4e58e59 | 158 | #endif |
3609d819 | 159 | icp_native_set_qirr(cpu, IPI_PRIORITY); |
0b05ac6e BH |
160 | } |
161 | ||
ec13e9b6 SW |
162 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
163 | void icp_native_cause_ipi_rm(int cpu) | |
164 | { | |
165 | /* | |
166 | * Currently not used to send IPIs to another CPU | |
167 | * on the same core. Only caller is KVM real mode. | |
168 | * Need the physical address of the XICS to be | |
169 | * previously saved in kvm_hstate in the paca. | |
170 | */ | |
171 | unsigned long xics_phys; | |
172 | ||
173 | /* | |
174 | * Just like the cause_ipi functions, it is required to | |
175 | * include a full barrier (out8 includes a sync) before | |
176 | * causing the IPI. | |
177 | */ | |
178 | xics_phys = paca[cpu].kvm_hstate.xics_phys; | |
179 | out_rm8((u8 *)(xics_phys + XICS_MFRR), IPI_PRIORITY); | |
180 | } | |
181 | #endif | |
182 | ||
d6a4f709 PM |
183 | /* |
184 | * Called when an interrupt is received on an off-line CPU to | |
185 | * clear the interrupt, so that the CPU can go back to nap mode. | |
186 | */ | |
187 | void icp_native_flush_interrupt(void) | |
188 | { | |
189 | unsigned int xirr = icp_native_get_xirr(); | |
190 | unsigned int vec = xirr & 0x00ffffff; | |
191 | ||
192 | if (vec == XICS_IRQ_SPURIOUS) | |
193 | return; | |
194 | if (vec == XICS_IPI) { | |
195 | /* Clear pending IPI */ | |
196 | int cpu = smp_processor_id(); | |
197 | kvmppc_set_host_ipi(cpu, 0); | |
198 | icp_native_set_qirr(cpu, 0xff); | |
199 | } else { | |
200 | pr_err("XICS: hw interrupt 0x%x to offline cpu, disabling\n", | |
201 | vec); | |
202 | xics_mask_unknown_vec(vec); | |
203 | } | |
204 | /* EOI the interrupt */ | |
205 | icp_native_set_xirr(xirr); | |
206 | } | |
207 | ||
371fefd6 PM |
208 | void xics_wake_cpu(int cpu) |
209 | { | |
210 | icp_native_set_qirr(cpu, IPI_PRIORITY); | |
211 | } | |
212 | EXPORT_SYMBOL_GPL(xics_wake_cpu); | |
213 | ||
0b05ac6e BH |
214 | static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) |
215 | { | |
216 | int cpu = smp_processor_id(); | |
217 | ||
54695c30 | 218 | kvmppc_set_host_ipi(cpu, 0); |
0b05ac6e BH |
219 | icp_native_set_qirr(cpu, 0xff); |
220 | ||
23d72bfd | 221 | return smp_ipi_demux(); |
0b05ac6e BH |
222 | } |
223 | ||
224 | #endif /* CONFIG_SMP */ | |
225 | ||
226 | static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, | |
227 | unsigned long size) | |
228 | { | |
229 | char *rname; | |
230 | int i, cpu = -1; | |
231 | ||
232 | /* This may look gross but it's good enough for now, we don't quite | |
233 | * have a hard -> linux processor id matching. | |
234 | */ | |
235 | for_each_possible_cpu(i) { | |
236 | if (!cpu_present(i)) | |
237 | continue; | |
238 | if (hw_id == get_hard_smp_processor_id(i)) { | |
239 | cpu = i; | |
240 | break; | |
241 | } | |
242 | } | |
243 | ||
244 | /* Fail, skip that CPU. Don't print, it's normal, some XICS come up | |
245 | * with way more entries in there than you have CPUs | |
246 | */ | |
247 | if (cpu == -1) | |
248 | return 0; | |
249 | ||
250 | rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", | |
251 | cpu, hw_id); | |
252 | ||
253 | if (!request_mem_region(addr, size, rname)) { | |
254 | pr_warning("icp_native: Could not reserve ICP MMIO" | |
255 | " for CPU %d, interrupt server #0x%x\n", | |
256 | cpu, hw_id); | |
257 | return -EBUSY; | |
258 | } | |
259 | ||
260 | icp_native_regs[cpu] = ioremap(addr, size); | |
371fefd6 | 261 | kvmppc_set_xics_phys(cpu, addr); |
0b05ac6e BH |
262 | if (!icp_native_regs[cpu]) { |
263 | pr_warning("icp_native: Failed ioremap for CPU %d, " | |
264 | "interrupt server #0x%x, addr %#lx\n", | |
265 | cpu, hw_id, addr); | |
266 | release_mem_region(addr, size); | |
267 | return -ENOMEM; | |
268 | } | |
269 | return 0; | |
270 | } | |
271 | ||
272 | static int __init icp_native_init_one_node(struct device_node *np, | |
273 | unsigned int *indx) | |
274 | { | |
275 | unsigned int ilen; | |
6f7aba7b | 276 | const __be32 *ireg; |
0b05ac6e BH |
277 | int i; |
278 | int reg_tuple_size; | |
279 | int num_servers = 0; | |
280 | ||
281 | /* This code does the theorically broken assumption that the interrupt | |
282 | * server numbers are the same as the hard CPU numbers. | |
283 | * This happens to be the case so far but we are playing with fire... | |
284 | * should be fixed one of these days. -BenH. | |
285 | */ | |
286 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen); | |
287 | ||
288 | /* Do that ever happen ? we'll know soon enough... but even good'old | |
289 | * f80 does have that property .. | |
290 | */ | |
291 | WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32))); | |
292 | ||
293 | if (ireg) { | |
294 | *indx = of_read_number(ireg, 1); | |
295 | if (ilen >= 2*sizeof(u32)) | |
296 | num_servers = of_read_number(ireg + 1, 1); | |
297 | } | |
298 | ||
299 | ireg = of_get_property(np, "reg", &ilen); | |
300 | if (!ireg) { | |
301 | pr_err("icp_native: Can't find interrupt reg property"); | |
302 | return -1; | |
303 | } | |
304 | ||
305 | reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; | |
306 | if (((ilen % reg_tuple_size) != 0) | |
307 | || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { | |
308 | pr_err("icp_native: ICP reg len (%d) != num servers (%d)", | |
309 | ilen / reg_tuple_size, num_servers); | |
310 | return -1; | |
311 | } | |
312 | ||
313 | for (i = 0; i < (ilen / reg_tuple_size); i++) { | |
314 | struct resource r; | |
315 | int err; | |
316 | ||
317 | err = of_address_to_resource(np, i, &r); | |
318 | if (err) { | |
319 | pr_err("icp_native: Could not translate ICP MMIO" | |
320 | " for interrupt server 0x%x (%d)\n", *indx, err); | |
321 | return -1; | |
322 | } | |
323 | ||
28f65c11 | 324 | if (icp_native_map_one_cpu(*indx, r.start, resource_size(&r))) |
0b05ac6e BH |
325 | return -1; |
326 | ||
327 | (*indx)++; | |
328 | } | |
329 | return 0; | |
330 | } | |
331 | ||
332 | static const struct icp_ops icp_native_ops = { | |
333 | .get_irq = icp_native_get_irq, | |
334 | .eoi = icp_native_eoi, | |
335 | .set_priority = icp_native_set_cpu_priority, | |
336 | .teardown_cpu = icp_native_teardown_cpu, | |
337 | .flush_ipi = icp_native_flush_ipi, | |
338 | #ifdef CONFIG_SMP | |
339 | .ipi_action = icp_native_ipi_action, | |
23d72bfd | 340 | .cause_ipi = icp_native_cause_ipi, |
0b05ac6e BH |
341 | #endif |
342 | }; | |
343 | ||
cf01a404 | 344 | int __init icp_native_init(void) |
0b05ac6e BH |
345 | { |
346 | struct device_node *np; | |
347 | u32 indx = 0; | |
348 | int found = 0; | |
349 | ||
350 | for_each_compatible_node(np, NULL, "ibm,ppc-xicp") | |
351 | if (icp_native_init_one_node(np, &indx) == 0) | |
352 | found = 1; | |
353 | if (!found) { | |
354 | for_each_node_by_type(np, | |
355 | "PowerPC-External-Interrupt-Presentation") { | |
356 | if (icp_native_init_one_node(np, &indx) == 0) | |
357 | found = 1; | |
358 | } | |
359 | } | |
360 | ||
361 | if (found == 0) | |
362 | return -ENODEV; | |
363 | ||
364 | icp_ops = &icp_native_ops; | |
365 | ||
366 | return 0; | |
367 | } |