Commit | Line | Data |
---|---|---|
7477de98 IY |
1 | /****************************************************************************** |
2 | * arch/ia64/xen/irq_xen.c | |
3 | * | |
4 | * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> | |
5 | * VA Linux Systems Japan K.K. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License as published by | |
9 | * the Free Software Foundation; either version 2 of the License, or | |
10 | * (at your option) any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
20 | * | |
21 | */ | |
22 | ||
23 | #include <linux/cpu.h> | |
24 | ||
25 | #include <xen/interface/xen.h> | |
26 | #include <xen/interface/callback.h> | |
27 | #include <xen/events.h> | |
28 | ||
29 | #include <asm/xen/privop.h> | |
30 | ||
31 | #include "irq_xen.h" | |
32 | ||
33 | /*************************************************************************** | |
34 | * pv_irq_ops | |
35 | * irq operations | |
36 | */ | |
37 | ||
38 | static int | |
39 | xen_assign_irq_vector(int irq) | |
40 | { | |
41 | struct physdev_irq irq_op; | |
42 | ||
43 | irq_op.irq = irq; | |
44 | if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) | |
45 | return -ENOSPC; | |
46 | ||
47 | return irq_op.vector; | |
48 | } | |
49 | ||
50 | static void | |
51 | xen_free_irq_vector(int vector) | |
52 | { | |
53 | struct physdev_irq irq_op; | |
54 | ||
55 | if (vector < IA64_FIRST_DEVICE_VECTOR || | |
56 | vector > IA64_LAST_DEVICE_VECTOR) | |
57 | return; | |
58 | ||
59 | irq_op.vector = vector; | |
60 | if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op)) | |
61 | printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n", | |
62 | __func__, vector); | |
63 | } | |
64 | ||
65 | ||
c6e22f9e TH |
66 | static DEFINE_PER_CPU(int, xen_timer_irq) = -1; |
67 | static DEFINE_PER_CPU(int, xen_ipi_irq) = -1; | |
68 | static DEFINE_PER_CPU(int, xen_resched_irq) = -1; | |
69 | static DEFINE_PER_CPU(int, xen_cmc_irq) = -1; | |
70 | static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1; | |
71 | static DEFINE_PER_CPU(int, xen_cpep_irq) = -1; | |
7477de98 | 72 | #define NAME_SIZE 15 |
c6e22f9e TH |
73 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name); |
74 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name); | |
75 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name); | |
76 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name); | |
77 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name); | |
78 | static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name); | |
7477de98 IY |
79 | #undef NAME_SIZE |
80 | ||
81 | struct saved_irq { | |
82 | unsigned int irq; | |
83 | struct irqaction *action; | |
84 | }; | |
85 | /* 16 should be far optimistic value, since only several percpu irqs | |
86 | * are registered early. | |
87 | */ | |
88 | #define MAX_LATE_IRQ 16 | |
89 | static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ]; | |
90 | static unsigned short late_irq_cnt; | |
91 | static unsigned short saved_irq_cnt; | |
92 | static int xen_slab_ready; | |
93 | ||
94 | #ifdef CONFIG_SMP | |
184748cc PZ |
95 | #include <linux/sched.h> |
96 | ||
7477de98 IY |
97 | /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ, |
98 | * it ends up to issue several memory accesses upon percpu data and | |
99 | * thus adds unnecessary traffic to other paths. | |
100 | */ | |
101 | static irqreturn_t | |
102 | xen_dummy_handler(int irq, void *dev_id) | |
103 | { | |
184748cc PZ |
104 | return IRQ_HANDLED; |
105 | } | |
7477de98 | 106 | |
184748cc PZ |
107 | static irqreturn_t |
108 | xen_resched_handler(int irq, void *dev_id) | |
109 | { | |
110 | scheduler_ipi(); | |
7477de98 IY |
111 | return IRQ_HANDLED; |
112 | } | |
113 | ||
114 | static struct irqaction xen_ipi_irqaction = { | |
115 | .handler = handle_IPI, | |
116 | .flags = IRQF_DISABLED, | |
117 | .name = "IPI" | |
118 | }; | |
119 | ||
120 | static struct irqaction xen_resched_irqaction = { | |
184748cc | 121 | .handler = xen_resched_handler, |
7477de98 IY |
122 | .flags = IRQF_DISABLED, |
123 | .name = "resched" | |
124 | }; | |
125 | ||
126 | static struct irqaction xen_tlb_irqaction = { | |
127 | .handler = xen_dummy_handler, | |
128 | .flags = IRQF_DISABLED, | |
129 | .name = "tlb_flush" | |
130 | }; | |
131 | #endif | |
132 | ||
133 | /* | |
134 | * This is xen version percpu irq registration, which needs bind | |
135 | * to xen specific evtchn sub-system. One trick here is that xen | |
136 | * evtchn binding interface depends on kmalloc because related | |
137 | * port needs to be freed at device/cpu down. So we cache the | |
138 | * registration on BSP before slab is ready and then deal them | |
139 | * at later point. For rest instances happening after slab ready, | |
140 | * we hook them to xen evtchn immediately. | |
141 | * | |
142 | * FIXME: MCA is not supported by far, and thus "nomca" boot param is | |
143 | * required. | |
144 | */ | |
145 | static void | |
146 | __xen_register_percpu_irq(unsigned int cpu, unsigned int vec, | |
147 | struct irqaction *action, int save) | |
148 | { | |
7477de98 IY |
149 | int irq = 0; |
150 | ||
151 | if (xen_slab_ready) { | |
152 | switch (vec) { | |
153 | case IA64_TIMER_VECTOR: | |
c6e22f9e TH |
154 | snprintf(per_cpu(xen_timer_name, cpu), |
155 | sizeof(per_cpu(xen_timer_name, cpu)), | |
7477de98 IY |
156 | "%s%d", action->name, cpu); |
157 | irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, | |
158 | action->handler, action->flags, | |
c6e22f9e TH |
159 | per_cpu(xen_timer_name, cpu), action->dev_id); |
160 | per_cpu(xen_timer_irq, cpu) = irq; | |
7477de98 IY |
161 | break; |
162 | case IA64_IPI_RESCHEDULE: | |
c6e22f9e TH |
163 | snprintf(per_cpu(xen_resched_name, cpu), |
164 | sizeof(per_cpu(xen_resched_name, cpu)), | |
7477de98 IY |
165 | "%s%d", action->name, cpu); |
166 | irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, | |
167 | action->handler, action->flags, | |
c6e22f9e TH |
168 | per_cpu(xen_resched_name, cpu), action->dev_id); |
169 | per_cpu(xen_resched_irq, cpu) = irq; | |
7477de98 IY |
170 | break; |
171 | case IA64_IPI_VECTOR: | |
c6e22f9e TH |
172 | snprintf(per_cpu(xen_ipi_name, cpu), |
173 | sizeof(per_cpu(xen_ipi_name, cpu)), | |
7477de98 IY |
174 | "%s%d", action->name, cpu); |
175 | irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, | |
176 | action->handler, action->flags, | |
c6e22f9e TH |
177 | per_cpu(xen_ipi_name, cpu), action->dev_id); |
178 | per_cpu(xen_ipi_irq, cpu) = irq; | |
7477de98 IY |
179 | break; |
180 | case IA64_CMC_VECTOR: | |
c6e22f9e TH |
181 | snprintf(per_cpu(xen_cmc_name, cpu), |
182 | sizeof(per_cpu(xen_cmc_name, cpu)), | |
7477de98 IY |
183 | "%s%d", action->name, cpu); |
184 | irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, | |
c6e22f9e TH |
185 | action->handler, |
186 | action->flags, | |
187 | per_cpu(xen_cmc_name, cpu), | |
188 | action->dev_id); | |
189 | per_cpu(xen_cmc_irq, cpu) = irq; | |
7477de98 IY |
190 | break; |
191 | case IA64_CMCP_VECTOR: | |
c6e22f9e TH |
192 | snprintf(per_cpu(xen_cmcp_name, cpu), |
193 | sizeof(per_cpu(xen_cmcp_name, cpu)), | |
7477de98 IY |
194 | "%s%d", action->name, cpu); |
195 | irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, | |
c6e22f9e TH |
196 | action->handler, |
197 | action->flags, | |
198 | per_cpu(xen_cmcp_name, cpu), | |
199 | action->dev_id); | |
200 | per_cpu(xen_cmcp_irq, cpu) = irq; | |
7477de98 IY |
201 | break; |
202 | case IA64_CPEP_VECTOR: | |
c6e22f9e TH |
203 | snprintf(per_cpu(xen_cpep_name, cpu), |
204 | sizeof(per_cpu(xen_cpep_name, cpu)), | |
7477de98 IY |
205 | "%s%d", action->name, cpu); |
206 | irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, | |
c6e22f9e TH |
207 | action->handler, |
208 | action->flags, | |
209 | per_cpu(xen_cpep_name, cpu), | |
210 | action->dev_id); | |
211 | per_cpu(xen_cpep_irq, cpu) = irq; | |
7477de98 IY |
212 | break; |
213 | case IA64_CPE_VECTOR: | |
214 | case IA64_MCA_RENDEZ_VECTOR: | |
215 | case IA64_PERFMON_VECTOR: | |
216 | case IA64_MCA_WAKEUP_VECTOR: | |
217 | case IA64_SPURIOUS_INT_VECTOR: | |
218 | /* No need to complain, these aren't supported. */ | |
219 | break; | |
220 | default: | |
221 | printk(KERN_WARNING "Percpu irq %d is unsupported " | |
222 | "by xen!\n", vec); | |
223 | break; | |
224 | } | |
225 | BUG_ON(irq < 0); | |
226 | ||
227 | if (irq > 0) { | |
228 | /* | |
229 | * Mark percpu. Without this, migrate_irqs() will | |
230 | * mark the interrupt for migrations and trigger it | |
231 | * on cpu hotplug. | |
232 | */ | |
41ef0203 | 233 | irq_set_status_flags(irq, IRQ_PER_CPU); |
7477de98 IY |
234 | } |
235 | } | |
236 | ||
237 | /* For BSP, we cache registered percpu irqs, and then re-walk | |
238 | * them when initializing APs | |
239 | */ | |
240 | if (!cpu && save) { | |
241 | BUG_ON(saved_irq_cnt == MAX_LATE_IRQ); | |
242 | saved_percpu_irqs[saved_irq_cnt].irq = vec; | |
243 | saved_percpu_irqs[saved_irq_cnt].action = action; | |
244 | saved_irq_cnt++; | |
245 | if (!xen_slab_ready) | |
246 | late_irq_cnt++; | |
247 | } | |
248 | } | |
249 | ||
250 | static void | |
251 | xen_register_percpu_irq(ia64_vector vec, struct irqaction *action) | |
252 | { | |
253 | __xen_register_percpu_irq(smp_processor_id(), vec, action, 1); | |
254 | } | |
255 | ||
256 | static void | |
257 | xen_bind_early_percpu_irq(void) | |
258 | { | |
259 | int i; | |
260 | ||
261 | xen_slab_ready = 1; | |
262 | /* There's no race when accessing this cached array, since only | |
263 | * BSP will face with such step shortly | |
264 | */ | |
265 | for (i = 0; i < late_irq_cnt; i++) | |
266 | __xen_register_percpu_irq(smp_processor_id(), | |
267 | saved_percpu_irqs[i].irq, | |
268 | saved_percpu_irqs[i].action, 0); | |
269 | } | |
270 | ||
271 | /* FIXME: There's no obvious point to check whether slab is ready. So | |
272 | * a hack is used here by utilizing a late time hook. | |
273 | */ | |
274 | ||
275 | #ifdef CONFIG_HOTPLUG_CPU | |
276 | static int __devinit | |
277 | unbind_evtchn_callback(struct notifier_block *nfb, | |
278 | unsigned long action, void *hcpu) | |
279 | { | |
280 | unsigned int cpu = (unsigned long)hcpu; | |
281 | ||
282 | if (action == CPU_DEAD) { | |
283 | /* Unregister evtchn. */ | |
c6e22f9e TH |
284 | if (per_cpu(xen_cpep_irq, cpu) >= 0) { |
285 | unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu), | |
286 | NULL); | |
287 | per_cpu(xen_cpep_irq, cpu) = -1; | |
7477de98 | 288 | } |
c6e22f9e TH |
289 | if (per_cpu(xen_cmcp_irq, cpu) >= 0) { |
290 | unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu), | |
291 | NULL); | |
292 | per_cpu(xen_cmcp_irq, cpu) = -1; | |
7477de98 | 293 | } |
c6e22f9e TH |
294 | if (per_cpu(xen_cmc_irq, cpu) >= 0) { |
295 | unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL); | |
296 | per_cpu(xen_cmc_irq, cpu) = -1; | |
7477de98 | 297 | } |
c6e22f9e TH |
298 | if (per_cpu(xen_ipi_irq, cpu) >= 0) { |
299 | unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL); | |
300 | per_cpu(xen_ipi_irq, cpu) = -1; | |
7477de98 | 301 | } |
c6e22f9e TH |
302 | if (per_cpu(xen_resched_irq, cpu) >= 0) { |
303 | unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), | |
304 | NULL); | |
305 | per_cpu(xen_resched_irq, cpu) = -1; | |
7477de98 | 306 | } |
c6e22f9e TH |
307 | if (per_cpu(xen_timer_irq, cpu) >= 0) { |
308 | unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu), | |
309 | NULL); | |
310 | per_cpu(xen_timer_irq, cpu) = -1; | |
7477de98 IY |
311 | } |
312 | } | |
313 | return NOTIFY_OK; | |
314 | } | |
315 | ||
316 | static struct notifier_block unbind_evtchn_notifier = { | |
317 | .notifier_call = unbind_evtchn_callback, | |
318 | .priority = 0 | |
319 | }; | |
320 | #endif | |
321 | ||
322 | void xen_smp_intr_init_early(unsigned int cpu) | |
323 | { | |
324 | #ifdef CONFIG_SMP | |
325 | unsigned int i; | |
326 | ||
327 | for (i = 0; i < saved_irq_cnt; i++) | |
328 | __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq, | |
329 | saved_percpu_irqs[i].action, 0); | |
330 | #endif | |
331 | } | |
332 | ||
333 | void xen_smp_intr_init(void) | |
334 | { | |
335 | #ifdef CONFIG_SMP | |
336 | unsigned int cpu = smp_processor_id(); | |
337 | struct callback_register event = { | |
338 | .type = CALLBACKTYPE_event, | |
339 | .address = { .ip = (unsigned long)&xen_event_callback }, | |
340 | }; | |
341 | ||
342 | if (cpu == 0) { | |
343 | /* Initialization was already done for boot cpu. */ | |
344 | #ifdef CONFIG_HOTPLUG_CPU | |
345 | /* Register the notifier only once. */ | |
346 | register_cpu_notifier(&unbind_evtchn_notifier); | |
347 | #endif | |
348 | return; | |
349 | } | |
350 | ||
351 | /* This should be piggyback when setup vcpu guest context */ | |
352 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); | |
353 | #endif /* CONFIG_SMP */ | |
354 | } | |
355 | ||
356 | void __init | |
357 | xen_irq_init(void) | |
358 | { | |
359 | struct callback_register event = { | |
360 | .type = CALLBACKTYPE_event, | |
361 | .address = { .ip = (unsigned long)&xen_event_callback }, | |
362 | }; | |
363 | ||
364 | xen_init_IRQ(); | |
365 | BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event)); | |
366 | late_time_init = xen_bind_early_percpu_irq; | |
367 | } | |
368 | ||
369 | void | |
370 | xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect) | |
371 | { | |
372 | #ifdef CONFIG_SMP | |
373 | /* TODO: we need to call vcpu_up here */ | |
374 | if (unlikely(vector == ap_wakeup_vector)) { | |
375 | /* XXX | |
376 | * This should be in __cpu_up(cpu) in ia64 smpboot.c | |
377 | * like x86. But don't want to modify it, | |
378 | * keep it untouched. | |
379 | */ | |
380 | xen_smp_intr_init_early(cpu); | |
381 | ||
382 | xen_send_ipi(cpu, vector); | |
383 | /* vcpu_prepare_and_up(cpu); */ | |
384 | return; | |
385 | } | |
386 | #endif | |
387 | ||
388 | switch (vector) { | |
389 | case IA64_IPI_VECTOR: | |
390 | xen_send_IPI_one(cpu, XEN_IPI_VECTOR); | |
391 | break; | |
392 | case IA64_IPI_RESCHEDULE: | |
393 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | |
394 | break; | |
395 | case IA64_CMCP_VECTOR: | |
396 | xen_send_IPI_one(cpu, XEN_CMCP_VECTOR); | |
397 | break; | |
398 | case IA64_CPEP_VECTOR: | |
399 | xen_send_IPI_one(cpu, XEN_CPEP_VECTOR); | |
400 | break; | |
401 | case IA64_TIMER_VECTOR: { | |
402 | /* this is used only once by check_sal_cache_flush() | |
403 | at boot time */ | |
404 | static int used = 0; | |
405 | if (!used) { | |
406 | xen_send_ipi(cpu, IA64_TIMER_VECTOR); | |
407 | used = 1; | |
408 | break; | |
409 | } | |
410 | /* fallthrough */ | |
411 | } | |
412 | default: | |
413 | printk(KERN_WARNING "Unsupported IPI type 0x%x\n", | |
414 | vector); | |
415 | notify_remote_via_irq(0); /* defaults to 0 irq */ | |
416 | break; | |
417 | } | |
418 | } | |
419 | ||
420 | static void __init | |
421 | xen_register_ipi(void) | |
422 | { | |
423 | #ifdef CONFIG_SMP | |
424 | register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction); | |
425 | register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction); | |
426 | register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction); | |
427 | #endif | |
428 | } | |
429 | ||
430 | static void | |
431 | xen_resend_irq(unsigned int vector) | |
432 | { | |
433 | (void)resend_irq_on_evtchn(vector); | |
434 | } | |
435 | ||
436 | const struct pv_irq_ops xen_irq_ops __initdata = { | |
437 | .register_ipi = xen_register_ipi, | |
438 | ||
439 | .assign_irq_vector = xen_assign_irq_vector, | |
440 | .free_irq_vector = xen_free_irq_vector, | |
441 | .register_percpu_irq = xen_register_percpu_irq, | |
442 | ||
443 | .resend_irq = xen_resend_irq, | |
444 | }; |