Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/ia64/kernel/irq.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | |
5 | * | |
6 | * This file contains the code used by various IRQ handling routines: | |
72fdbdce | 7 | * asking for different IRQs should be done through these routines |
1da177e4 LT |
8 | * instead of just grabbing them. Thus setups with different IRQ numbers |
9 | * shouldn't result in any weird surprises, and installing new handlers | |
10 | * should be easier. | |
11 | * | |
12 | * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 | |
13 | * | |
14 | * 4/14/2004: Added code to handle cpu migration and do safe irq | |
72fdbdce | 15 | * migration without losing interrupts for iosapic |
1da177e4 LT |
16 | * architecture. |
17 | */ | |
18 | ||
19 | #include <asm/delay.h> | |
20 | #include <asm/uaccess.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/seq_file.h> | |
23 | #include <linux/interrupt.h> | |
24 | #include <linux/kernel_stat.h> | |
25 | ||
26 | /* | |
27 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
28 | * each architecture has to answer this themselves. | |
29 | */ | |
30 | void ack_bad_irq(unsigned int irq) | |
31 | { | |
32 | printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); | |
33 | } | |
34 | ||
35 | #ifdef CONFIG_IA64_GENERIC | |
1115200a KK |
36 | ia64_vector __ia64_irq_to_vector(int irq) |
37 | { | |
38 | return irq_cfg[irq].vector; | |
39 | } | |
40 | ||
1da177e4 LT |
41 | unsigned int __ia64_local_vector_to_irq (ia64_vector vec) |
42 | { | |
e1b30a39 | 43 | return __get_cpu_var(vector_irq)[vec]; |
1da177e4 LT |
44 | } |
45 | #endif | |
46 | ||
47 | /* | |
48 | * Interrupt statistics: | |
49 | */ | |
50 | ||
51 | atomic_t irq_err_count; | |
52 | ||
53 | /* | |
54 | * /proc/interrupts printing: | |
55 | */ | |
56 | ||
57 | int show_interrupts(struct seq_file *p, void *v) | |
58 | { | |
59 | int i = *(loff_t *) v, j; | |
60 | struct irqaction * action; | |
61 | unsigned long flags; | |
62 | ||
63 | if (i == 0) { | |
ddd6fc79 KK |
64 | char cpuname[16]; |
65 | seq_printf(p, " "); | |
dc565b52 | 66 | for_each_online_cpu(j) { |
ddd6fc79 KK |
67 | snprintf(cpuname, 10, "CPU%d", j); |
68 | seq_printf(p, "%10s ", cpuname); | |
dc565b52 | 69 | } |
1da177e4 LT |
70 | seq_putc(p, '\n'); |
71 | } | |
72 | ||
73 | if (i < NR_IRQS) { | |
74 | spin_lock_irqsave(&irq_desc[i].lock, flags); | |
75 | action = irq_desc[i].action; | |
76 | if (!action) | |
77 | goto skip; | |
78 | seq_printf(p, "%3d: ",i); | |
79 | #ifndef CONFIG_SMP | |
80 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
81 | #else | |
dc565b52 | 82 | for_each_online_cpu(j) { |
dee4102a | 83 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); |
dc565b52 | 84 | } |
1da177e4 | 85 | #endif |
351a5839 | 86 | seq_printf(p, " %14s", irq_desc[i].chip->name); |
1da177e4 LT |
87 | seq_printf(p, " %s", action->name); |
88 | ||
89 | for (action=action->next; action; action = action->next) | |
90 | seq_printf(p, ", %s", action->name); | |
91 | ||
92 | seq_putc(p, '\n'); | |
93 | skip: | |
94 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | |
95 | } else if (i == NR_IRQS) | |
96 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | |
97 | return 0; | |
98 | } | |
99 | ||
100 | #ifdef CONFIG_SMP | |
1da177e4 LT |
101 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; |
102 | ||
1da177e4 LT |
103 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
104 | { | |
1da177e4 | 105 | if (irq < NR_IRQS) { |
e65e49d0 | 106 | cpumask_copy(irq_desc[irq].affinity, |
d3b66bf2 | 107 | cpumask_of(cpu_logical_id(hwid))); |
1da177e4 LT |
108 | irq_redir[irq] = (char) (redir & 0xff); |
109 | } | |
110 | } | |
25d61578 | 111 | |
d3b66bf2 | 112 | bool is_affinity_mask_valid(const struct cpumask *cpumask) |
25d61578 JK |
113 | { |
114 | if (ia64_platform_is("sn2")) { | |
115 | /* Only allow one CPU to be specified in the smp_affinity mask */ | |
6bdf197b | 116 | if (cpumask_weight(cpumask) != 1) |
25d61578 JK |
117 | return false; |
118 | } | |
119 | return true; | |
120 | } | |
121 | ||
1da177e4 LT |
122 | #endif /* CONFIG_SMP */ |
123 | ||
124 | #ifdef CONFIG_HOTPLUG_CPU | |
125 | unsigned int vectors_in_migration[NR_IRQS]; | |
126 | ||
127 | /* | |
d3b66bf2 | 128 | * Since cpu_online_mask is already updated, we just need to check for |
1da177e4 LT |
129 | * affinity that has zeros |
130 | */ | |
131 | static void migrate_irqs(void) | |
132 | { | |
86bc3dfe | 133 | struct irq_desc *desc; |
1da177e4 LT |
134 | int irq, new_cpu; |
135 | ||
136 | for (irq=0; irq < NR_IRQS; irq++) { | |
a8553acd | 137 | desc = irq_desc + irq; |
1da177e4 | 138 | |
29a00277 MD |
139 | if (desc->status == IRQ_DISABLED) |
140 | continue; | |
141 | ||
1da177e4 LT |
142 | /* |
143 | * No handling for now. | |
144 | * TBD: Implement a disable function so we can now | |
145 | * tell CPU not to respond to these local intr sources. | |
146 | * such as ITV,CPEI,MCA etc. | |
147 | */ | |
148 | if (desc->status == IRQ_PER_CPU) | |
149 | continue; | |
150 | ||
e65e49d0 | 151 | if (cpumask_any_and(irq_desc[irq].affinity, cpu_online_mask) |
0de26520 | 152 | >= nr_cpu_ids) { |
1da177e4 LT |
153 | /* |
154 | * Save it for phase 2 processing | |
155 | */ | |
156 | vectors_in_migration[irq] = irq; | |
157 | ||
d3b66bf2 | 158 | new_cpu = cpumask_any(cpu_online_mask); |
1da177e4 LT |
159 | |
160 | /* | |
161 | * Al three are essential, currently WARN_ON.. maybe panic? | |
162 | */ | |
d1bef4ed IM |
163 | if (desc->chip && desc->chip->disable && |
164 | desc->chip->enable && desc->chip->set_affinity) { | |
165 | desc->chip->disable(irq); | |
0de26520 RR |
166 | desc->chip->set_affinity(irq, |
167 | cpumask_of(new_cpu)); | |
d1bef4ed | 168 | desc->chip->enable(irq); |
1da177e4 | 169 | } else { |
d1bef4ed IM |
170 | WARN_ON((!(desc->chip) || !(desc->chip->disable) || |
171 | !(desc->chip->enable) || | |
172 | !(desc->chip->set_affinity))); | |
1da177e4 LT |
173 | } |
174 | } | |
175 | } | |
176 | } | |
177 | ||
178 | void fixup_irqs(void) | |
179 | { | |
180 | unsigned int irq; | |
181 | extern void ia64_process_pending_intr(void); | |
ff741906 AR |
182 | extern volatile int time_keeper_id; |
183 | ||
751fc784 HS |
184 | /* Mask ITV to disable timer */ |
185 | ia64_set_itv(1 << 16); | |
ff741906 AR |
186 | |
187 | /* | |
188 | * Find a new timesync master | |
189 | */ | |
190 | if (smp_processor_id() == time_keeper_id) { | |
d3b66bf2 | 191 | time_keeper_id = cpumask_first(cpu_online_mask); |
ff741906 AR |
192 | printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); |
193 | } | |
1da177e4 | 194 | |
1da177e4 | 195 | /* |
72fdbdce | 196 | * Phase 1: Locate IRQs bound to this cpu and |
1da177e4 LT |
197 | * relocate them for cpu removal. |
198 | */ | |
199 | migrate_irqs(); | |
200 | ||
201 | /* | |
202 | * Phase 2: Perform interrupt processing for all entries reported in | |
203 | * local APIC. | |
204 | */ | |
205 | ia64_process_pending_intr(); | |
206 | ||
207 | /* | |
208 | * Phase 3: Now handle any interrupts not captured in local APIC. | |
209 | * This is to account for cases that device interrupted during the time the | |
210 | * rte was being disabled and re-programmed. | |
211 | */ | |
212 | for (irq=0; irq < NR_IRQS; irq++) { | |
213 | if (vectors_in_migration[irq]) { | |
8c1addbc TL |
214 | struct pt_regs *old_regs = set_irq_regs(NULL); |
215 | ||
1da177e4 | 216 | vectors_in_migration[irq]=0; |
5fbb004a | 217 | generic_handle_irq(irq); |
8c1addbc | 218 | set_irq_regs(old_regs); |
1da177e4 LT |
219 | } |
220 | } | |
221 | ||
222 | /* | |
223 | * Now let processor die. We do irq disable and max_xtp() to | |
224 | * ensure there is no more interrupts routed to this processor. | |
225 | * But the local timer interrupt can have 1 pending which we | |
226 | * take care in timer_interrupt(). | |
227 | */ | |
228 | max_xtp(); | |
229 | local_irq_disable(); | |
230 | } | |
231 | #endif |