Merge tag 'omap-for-v3.16/l3-noc-signed' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / arch / sh / kernel / irq.c
1 /*
2 * linux/arch/sh/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 *
7 * SuperH version: Copyright (C) 1999 Niibe Yutaka
8 */
9 #include <linux/irq.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/seq_file.h>
14 #include <linux/ftrace.h>
15 #include <linux/delay.h>
16 #include <linux/ratelimit.h>
17 #include <asm/processor.h>
18 #include <asm/machvec.h>
19 #include <asm/uaccess.h>
20 #include <asm/thread_info.h>
21 #include <cpu/mmu_context.h>
22
23 atomic_t irq_err_count;
24
25 /*
26 * 'what should we do if we get a hw irq event on an illegal vector'.
27 * each architecture has to answer this themselves, it doesn't deserve
28 * a generic callback i think.
29 */
30 void ack_bad_irq(unsigned int irq)
31 {
32 atomic_inc(&irq_err_count);
33 printk("unexpected IRQ trap at vector %02x\n", irq);
34 }
35
36 #if defined(CONFIG_PROC_FS)
37 /*
38 * /proc/interrupts printing for arch specific interrupts
39 */
40 int arch_show_interrupts(struct seq_file *p, int prec)
41 {
42 int j;
43
44 seq_printf(p, "%*s: ", prec, "NMI");
45 for_each_online_cpu(j)
46 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
47 seq_printf(p, " Non-maskable interrupts\n");
48
49 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
50
51 return 0;
52 }
53 #endif
54
55 #ifdef CONFIG_IRQSTACKS
56 /*
57 * per-CPU IRQ handling contexts (thread information and stack)
58 */
59 union irq_ctx {
60 struct thread_info tinfo;
61 u32 stack[THREAD_SIZE/sizeof(u32)];
62 };
63
64 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
65 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
66
67 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
68 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
69
70 static inline void handle_one_irq(unsigned int irq)
71 {
72 union irq_ctx *curctx, *irqctx;
73
74 curctx = (union irq_ctx *)current_thread_info();
75 irqctx = hardirq_ctx[smp_processor_id()];
76
77 /*
78 * this is where we switch to the IRQ stack. However, if we are
79 * already using the IRQ stack (because we interrupted a hardirq
80 * handler) we can't do that and just have to keep using the
81 * current stack (which is the irq stack already after all)
82 */
83 if (curctx != irqctx) {
84 u32 *isp;
85
86 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
87 irqctx->tinfo.task = curctx->tinfo.task;
88 irqctx->tinfo.previous_sp = current_stack_pointer;
89
90 /*
91 * Copy the softirq bits in preempt_count so that the
92 * softirq checks work in the hardirq context.
93 */
94 irqctx->tinfo.preempt_count =
95 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
96 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
97
98 __asm__ __volatile__ (
99 "mov %0, r4 \n"
100 "mov r15, r8 \n"
101 "jsr @%1 \n"
102 /* swith to the irq stack */
103 " mov %2, r15 \n"
104 /* restore the stack (ring zero) */
105 "mov r8, r15 \n"
106 : /* no outputs */
107 : "r" (irq), "r" (generic_handle_irq), "r" (isp)
108 : "memory", "r0", "r1", "r2", "r3", "r4",
109 "r5", "r6", "r7", "r8", "t", "pr"
110 );
111 } else
112 generic_handle_irq(irq);
113 }
114
115 /*
116 * allocate per-cpu stacks for hardirq and for softirq processing
117 */
118 void irq_ctx_init(int cpu)
119 {
120 union irq_ctx *irqctx;
121
122 if (hardirq_ctx[cpu])
123 return;
124
125 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
126 irqctx->tinfo.task = NULL;
127 irqctx->tinfo.exec_domain = NULL;
128 irqctx->tinfo.cpu = cpu;
129 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
130 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
131
132 hardirq_ctx[cpu] = irqctx;
133
134 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
135 irqctx->tinfo.task = NULL;
136 irqctx->tinfo.exec_domain = NULL;
137 irqctx->tinfo.cpu = cpu;
138 irqctx->tinfo.preempt_count = 0;
139 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
140
141 softirq_ctx[cpu] = irqctx;
142
143 printk("CPU %u irqstacks, hard=%p soft=%p\n",
144 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
145 }
146
147 void irq_ctx_exit(int cpu)
148 {
149 hardirq_ctx[cpu] = NULL;
150 }
151
152 void do_softirq_own_stack(void)
153 {
154 struct thread_info *curctx;
155 union irq_ctx *irqctx;
156 u32 *isp;
157
158 curctx = current_thread_info();
159 irqctx = softirq_ctx[smp_processor_id()];
160 irqctx->tinfo.task = curctx->task;
161 irqctx->tinfo.previous_sp = current_stack_pointer;
162
163 /* build the stack frame on the softirq stack */
164 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
165
166 __asm__ __volatile__ (
167 "mov r15, r9 \n"
168 "jsr @%0 \n"
169 /* switch to the softirq stack */
170 " mov %1, r15 \n"
171 /* restore the thread stack */
172 "mov r9, r15 \n"
173 : /* no outputs */
174 : "r" (__do_softirq), "r" (isp)
175 : "memory", "r0", "r1", "r2", "r3", "r4",
176 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
177 );
178 }
179 #else
180 static inline void handle_one_irq(unsigned int irq)
181 {
182 generic_handle_irq(irq);
183 }
184 #endif
185
186 asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
187 {
188 struct pt_regs *old_regs = set_irq_regs(regs);
189
190 irq_enter();
191
192 irq = irq_demux(irq_lookup(irq));
193
194 if (irq != NO_IRQ_IGNORE) {
195 handle_one_irq(irq);
196 irq_finish(irq);
197 }
198
199 irq_exit();
200
201 set_irq_regs(old_regs);
202
203 return IRQ_HANDLED;
204 }
205
206 void __init init_IRQ(void)
207 {
208 plat_irq_setup();
209
210 /* Perform the machine specific initialisation */
211 if (sh_mv.mv_init_irq)
212 sh_mv.mv_init_irq();
213
214 intc_finalize();
215
216 irq_ctx_init(smp_processor_id());
217 }
218
219 #ifdef CONFIG_HOTPLUG_CPU
220 /*
221 * The CPU has been marked offline. Migrate IRQs off this CPU. If
222 * the affinity settings do not allow other CPUs, force them onto any
223 * available CPU.
224 */
225 void migrate_irqs(void)
226 {
227 unsigned int irq, cpu = smp_processor_id();
228
229 for_each_active_irq(irq) {
230 struct irq_data *data = irq_get_irq_data(irq);
231
232 if (data->node == cpu) {
233 unsigned int newcpu = cpumask_any_and(data->affinity,
234 cpu_online_mask);
235 if (newcpu >= nr_cpu_ids) {
236 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
237 irq, cpu);
238
239 cpumask_setall(data->affinity);
240 }
241 irq_set_affinity(irq, data->affinity);
242 }
243 }
244 }
245 #endif
This page took 0.035974 seconds and 6 git commands to generate.