| 1 | /* |
| 2 | * Common interrupt code for 32 and 64 bit |
| 3 | */ |
| 4 | #include <linux/cpu.h> |
| 5 | #include <linux/interrupt.h> |
| 6 | #include <linux/kernel_stat.h> |
| 7 | #include <linux/of.h> |
| 8 | #include <linux/seq_file.h> |
| 9 | #include <linux/smp.h> |
| 10 | #include <linux/ftrace.h> |
| 11 | #include <linux/delay.h> |
| 12 | #include <linux/export.h> |
| 13 | |
| 14 | #include <asm/apic.h> |
| 15 | #include <asm/io_apic.h> |
| 16 | #include <asm/irq.h> |
| 17 | #include <asm/idle.h> |
| 18 | #include <asm/mce.h> |
| 19 | #include <asm/hw_irq.h> |
| 20 | |
| 21 | atomic_t irq_err_count; |
| 22 | |
| 23 | /* Function pointer for generic interrupt vector handling */ |
| 24 | void (*x86_platform_ipi_callback)(void) = NULL; |
| 25 | |
| 26 | /* |
| 27 | * 'what should we do if we get a hw irq event on an illegal vector'. |
| 28 | * each architecture has to answer this themselves. |
| 29 | */ |
| 30 | void ack_bad_irq(unsigned int irq) |
| 31 | { |
| 32 | if (printk_ratelimit()) |
| 33 | pr_err("unexpected IRQ trap at vector %02x\n", irq); |
| 34 | |
| 35 | /* |
| 36 | * Currently unexpected vectors happen only on SMP and APIC. |
| 37 | * We _must_ ack these because every local APIC has only N |
| 38 | * irq slots per priority level, and a 'hanging, unacked' IRQ |
| 39 | * holds up an irq slot - in excessive cases (when multiple |
| 40 | * unexpected vectors occur) that might lock up the APIC |
| 41 | * completely. |
| 42 | * But only ack when the APIC is enabled -AK |
| 43 | */ |
| 44 | ack_APIC_irq(); |
| 45 | } |
| 46 | |
| 47 | #define irq_stats(x) (&per_cpu(irq_stat, x)) |
| 48 | /* |
| 49 | * /proc/interrupts printing for arch specific interrupts |
| 50 | */ |
| 51 | int arch_show_interrupts(struct seq_file *p, int prec) |
| 52 | { |
| 53 | int j; |
| 54 | |
| 55 | seq_printf(p, "%*s: ", prec, "NMI"); |
| 56 | for_each_online_cpu(j) |
| 57 | seq_printf(p, "%10u ", irq_stats(j)->__nmi_count); |
| 58 | seq_printf(p, " Non-maskable interrupts\n"); |
| 59 | #ifdef CONFIG_X86_LOCAL_APIC |
| 60 | seq_printf(p, "%*s: ", prec, "LOC"); |
| 61 | for_each_online_cpu(j) |
| 62 | seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs); |
| 63 | seq_printf(p, " Local timer interrupts\n"); |
| 64 | |
| 65 | seq_printf(p, "%*s: ", prec, "SPU"); |
| 66 | for_each_online_cpu(j) |
| 67 | seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count); |
| 68 | seq_printf(p, " Spurious interrupts\n"); |
| 69 | seq_printf(p, "%*s: ", prec, "PMI"); |
| 70 | for_each_online_cpu(j) |
| 71 | seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs); |
| 72 | seq_printf(p, " Performance monitoring interrupts\n"); |
| 73 | seq_printf(p, "%*s: ", prec, "IWI"); |
| 74 | for_each_online_cpu(j) |
| 75 | seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs); |
| 76 | seq_printf(p, " IRQ work interrupts\n"); |
| 77 | seq_printf(p, "%*s: ", prec, "RTR"); |
| 78 | for_each_online_cpu(j) |
| 79 | seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count); |
| 80 | seq_printf(p, " APIC ICR read retries\n"); |
| 81 | #endif |
| 82 | if (x86_platform_ipi_callback) { |
| 83 | seq_printf(p, "%*s: ", prec, "PLT"); |
| 84 | for_each_online_cpu(j) |
| 85 | seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis); |
| 86 | seq_printf(p, " Platform interrupts\n"); |
| 87 | } |
| 88 | #ifdef CONFIG_SMP |
| 89 | seq_printf(p, "%*s: ", prec, "RES"); |
| 90 | for_each_online_cpu(j) |
| 91 | seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count); |
| 92 | seq_printf(p, " Rescheduling interrupts\n"); |
| 93 | seq_printf(p, "%*s: ", prec, "CAL"); |
| 94 | for_each_online_cpu(j) |
| 95 | seq_printf(p, "%10u ", irq_stats(j)->irq_call_count - |
| 96 | irq_stats(j)->irq_tlb_count); |
| 97 | seq_printf(p, " Function call interrupts\n"); |
| 98 | seq_printf(p, "%*s: ", prec, "TLB"); |
| 99 | for_each_online_cpu(j) |
| 100 | seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count); |
| 101 | seq_printf(p, " TLB shootdowns\n"); |
| 102 | #endif |
| 103 | #ifdef CONFIG_X86_THERMAL_VECTOR |
| 104 | seq_printf(p, "%*s: ", prec, "TRM"); |
| 105 | for_each_online_cpu(j) |
| 106 | seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count); |
| 107 | seq_printf(p, " Thermal event interrupts\n"); |
| 108 | #endif |
| 109 | #ifdef CONFIG_X86_MCE_THRESHOLD |
| 110 | seq_printf(p, "%*s: ", prec, "THR"); |
| 111 | for_each_online_cpu(j) |
| 112 | seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count); |
| 113 | seq_printf(p, " Threshold APIC interrupts\n"); |
| 114 | #endif |
| 115 | #ifdef CONFIG_X86_MCE |
| 116 | seq_printf(p, "%*s: ", prec, "MCE"); |
| 117 | for_each_online_cpu(j) |
| 118 | seq_printf(p, "%10u ", per_cpu(mce_exception_count, j)); |
| 119 | seq_printf(p, " Machine check exceptions\n"); |
| 120 | seq_printf(p, "%*s: ", prec, "MCP"); |
| 121 | for_each_online_cpu(j) |
| 122 | seq_printf(p, "%10u ", per_cpu(mce_poll_count, j)); |
| 123 | seq_printf(p, " Machine check polls\n"); |
| 124 | #endif |
| 125 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); |
| 126 | #if defined(CONFIG_X86_IO_APIC) |
| 127 | seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count)); |
| 128 | #endif |
| 129 | return 0; |
| 130 | } |
| 131 | |
| 132 | /* |
| 133 | * /proc/stat helpers |
| 134 | */ |
| 135 | u64 arch_irq_stat_cpu(unsigned int cpu) |
| 136 | { |
| 137 | u64 sum = irq_stats(cpu)->__nmi_count; |
| 138 | |
| 139 | #ifdef CONFIG_X86_LOCAL_APIC |
| 140 | sum += irq_stats(cpu)->apic_timer_irqs; |
| 141 | sum += irq_stats(cpu)->irq_spurious_count; |
| 142 | sum += irq_stats(cpu)->apic_perf_irqs; |
| 143 | sum += irq_stats(cpu)->apic_irq_work_irqs; |
| 144 | sum += irq_stats(cpu)->icr_read_retry_count; |
| 145 | #endif |
| 146 | if (x86_platform_ipi_callback) |
| 147 | sum += irq_stats(cpu)->x86_platform_ipis; |
| 148 | #ifdef CONFIG_SMP |
| 149 | sum += irq_stats(cpu)->irq_resched_count; |
| 150 | sum += irq_stats(cpu)->irq_call_count; |
| 151 | #endif |
| 152 | #ifdef CONFIG_X86_THERMAL_VECTOR |
| 153 | sum += irq_stats(cpu)->irq_thermal_count; |
| 154 | #endif |
| 155 | #ifdef CONFIG_X86_MCE_THRESHOLD |
| 156 | sum += irq_stats(cpu)->irq_threshold_count; |
| 157 | #endif |
| 158 | #ifdef CONFIG_X86_MCE |
| 159 | sum += per_cpu(mce_exception_count, cpu); |
| 160 | sum += per_cpu(mce_poll_count, cpu); |
| 161 | #endif |
| 162 | return sum; |
| 163 | } |
| 164 | |
| 165 | u64 arch_irq_stat(void) |
| 166 | { |
| 167 | u64 sum = atomic_read(&irq_err_count); |
| 168 | return sum; |
| 169 | } |
| 170 | |
| 171 | |
| 172 | /* |
| 173 | * do_IRQ handles all normal device IRQ's (the special |
| 174 | * SMP cross-CPU interrupts have their own specific |
| 175 | * handlers). |
| 176 | */ |
| 177 | unsigned int __irq_entry do_IRQ(struct pt_regs *regs) |
| 178 | { |
| 179 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 180 | |
| 181 | /* high bit used in ret_from_ code */ |
| 182 | unsigned vector = ~regs->orig_ax; |
| 183 | unsigned irq; |
| 184 | |
| 185 | irq_enter(); |
| 186 | exit_idle(); |
| 187 | |
| 188 | irq = __this_cpu_read(vector_irq[vector]); |
| 189 | |
| 190 | if (!handle_irq(irq, regs)) { |
| 191 | ack_APIC_irq(); |
| 192 | |
| 193 | if (printk_ratelimit()) |
| 194 | pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n", |
| 195 | __func__, smp_processor_id(), vector, irq); |
| 196 | } |
| 197 | |
| 198 | irq_exit(); |
| 199 | |
| 200 | set_irq_regs(old_regs); |
| 201 | return 1; |
| 202 | } |
| 203 | |
| 204 | /* |
| 205 | * Handler for X86_PLATFORM_IPI_VECTOR. |
| 206 | */ |
| 207 | void smp_x86_platform_ipi(struct pt_regs *regs) |
| 208 | { |
| 209 | struct pt_regs *old_regs = set_irq_regs(regs); |
| 210 | |
| 211 | ack_APIC_irq(); |
| 212 | |
| 213 | irq_enter(); |
| 214 | |
| 215 | exit_idle(); |
| 216 | |
| 217 | inc_irq_stat(x86_platform_ipis); |
| 218 | |
| 219 | if (x86_platform_ipi_callback) |
| 220 | x86_platform_ipi_callback(); |
| 221 | |
| 222 | irq_exit(); |
| 223 | |
| 224 | set_irq_regs(old_regs); |
| 225 | } |
| 226 | |
| 227 | EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq); |
| 228 | |
| 229 | #ifdef CONFIG_HOTPLUG_CPU |
| 230 | /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */ |
| 231 | void fixup_irqs(void) |
| 232 | { |
| 233 | unsigned int irq, vector; |
| 234 | static int warned; |
| 235 | struct irq_desc *desc; |
| 236 | struct irq_data *data; |
| 237 | struct irq_chip *chip; |
| 238 | |
| 239 | for_each_irq_desc(irq, desc) { |
| 240 | int break_affinity = 0; |
| 241 | int set_affinity = 1; |
| 242 | const struct cpumask *affinity; |
| 243 | |
| 244 | if (!desc) |
| 245 | continue; |
| 246 | if (irq == 2) |
| 247 | continue; |
| 248 | |
| 249 | /* interrupt's are disabled at this point */ |
| 250 | raw_spin_lock(&desc->lock); |
| 251 | |
| 252 | data = irq_desc_get_irq_data(desc); |
| 253 | affinity = data->affinity; |
| 254 | if (!irq_has_action(irq) || irqd_is_per_cpu(data) || |
| 255 | cpumask_subset(affinity, cpu_online_mask)) { |
| 256 | raw_spin_unlock(&desc->lock); |
| 257 | continue; |
| 258 | } |
| 259 | |
| 260 | /* |
| 261 | * Complete the irq move. This cpu is going down and for |
| 262 | * non intr-remapping case, we can't wait till this interrupt |
| 263 | * arrives at this cpu before completing the irq move. |
| 264 | */ |
| 265 | irq_force_complete_move(irq); |
| 266 | |
| 267 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
| 268 | break_affinity = 1; |
| 269 | affinity = cpu_online_mask; |
| 270 | } |
| 271 | |
| 272 | chip = irq_data_get_irq_chip(data); |
| 273 | if (!irqd_can_move_in_process_context(data) && chip->irq_mask) |
| 274 | chip->irq_mask(data); |
| 275 | |
| 276 | if (chip->irq_set_affinity) |
| 277 | chip->irq_set_affinity(data, affinity, true); |
| 278 | else if (!(warned++)) |
| 279 | set_affinity = 0; |
| 280 | |
| 281 | /* |
| 282 | * We unmask if the irq was not marked masked by the |
| 283 | * core code. That respects the lazy irq disable |
| 284 | * behaviour. |
| 285 | */ |
| 286 | if (!irqd_can_move_in_process_context(data) && |
| 287 | !irqd_irq_masked(data) && chip->irq_unmask) |
| 288 | chip->irq_unmask(data); |
| 289 | |
| 290 | raw_spin_unlock(&desc->lock); |
| 291 | |
| 292 | if (break_affinity && set_affinity) |
| 293 | pr_notice("Broke affinity for irq %i\n", irq); |
| 294 | else if (!set_affinity) |
| 295 | pr_notice("Cannot set affinity for irq %i\n", irq); |
| 296 | } |
| 297 | |
| 298 | /* |
| 299 | * We can remove mdelay() and then send spuriuous interrupts to |
| 300 | * new cpu targets for all the irqs that were handled previously by |
| 301 | * this cpu. While it works, I have seen spurious interrupt messages |
| 302 | * (nothing wrong but still...). |
| 303 | * |
| 304 | * So for now, retain mdelay(1) and check the IRR and then send those |
| 305 | * interrupts to new targets as this cpu is already offlined... |
| 306 | */ |
| 307 | mdelay(1); |
| 308 | |
| 309 | for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) { |
| 310 | unsigned int irr; |
| 311 | |
| 312 | if (__this_cpu_read(vector_irq[vector]) < 0) |
| 313 | continue; |
| 314 | |
| 315 | irr = apic_read(APIC_IRR + (vector / 32 * 0x10)); |
| 316 | if (irr & (1 << (vector % 32))) { |
| 317 | irq = __this_cpu_read(vector_irq[vector]); |
| 318 | |
| 319 | desc = irq_to_desc(irq); |
| 320 | data = irq_desc_get_irq_data(desc); |
| 321 | chip = irq_data_get_irq_chip(data); |
| 322 | raw_spin_lock(&desc->lock); |
| 323 | if (chip->irq_retrigger) |
| 324 | chip->irq_retrigger(data); |
| 325 | raw_spin_unlock(&desc->lock); |
| 326 | } |
| 327 | __this_cpu_write(vector_irq[vector], -1); |
| 328 | } |
| 329 | } |
| 330 | #endif |