sh: Remove signal translation and exec_domain
[deliverable/linux.git] / arch / sh / kernel / irq.c
1 /*
2 * linux/arch/sh/kernel/irq.c
3 *
4 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
5 *
6 *
7 * SuperH version: Copyright (C) 1999 Niibe Yutaka
8 */
9 #include <linux/irq.h>
10 #include <linux/interrupt.h>
11 #include <linux/module.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/seq_file.h>
14 #include <linux/ftrace.h>
15 #include <linux/delay.h>
16 #include <linux/ratelimit.h>
17 #include <asm/processor.h>
18 #include <asm/machvec.h>
19 #include <asm/uaccess.h>
20 #include <asm/thread_info.h>
21 #include <cpu/mmu_context.h>
22
23 atomic_t irq_err_count;
24
25 /*
26 * 'what should we do if we get a hw irq event on an illegal vector'.
27 * each architecture has to answer this themselves, it doesn't deserve
28 * a generic callback i think.
29 */
30 void ack_bad_irq(unsigned int irq)
31 {
32 atomic_inc(&irq_err_count);
33 printk("unexpected IRQ trap at vector %02x\n", irq);
34 }
35
36 #if defined(CONFIG_PROC_FS)
37 /*
38 * /proc/interrupts printing for arch specific interrupts
39 */
40 int arch_show_interrupts(struct seq_file *p, int prec)
41 {
42 int j;
43
44 seq_printf(p, "%*s: ", prec, "NMI");
45 for_each_online_cpu(j)
46 seq_printf(p, "%10u ", irq_stat[j].__nmi_count);
47 seq_printf(p, " Non-maskable interrupts\n");
48
49 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
50
51 return 0;
52 }
53 #endif
54
55 #ifdef CONFIG_IRQSTACKS
56 /*
57 * per-CPU IRQ handling contexts (thread information and stack)
58 */
59 union irq_ctx {
60 struct thread_info tinfo;
61 u32 stack[THREAD_SIZE/sizeof(u32)];
62 };
63
64 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
65 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
66
67 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
68 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
69
70 static inline void handle_one_irq(unsigned int irq)
71 {
72 union irq_ctx *curctx, *irqctx;
73
74 curctx = (union irq_ctx *)current_thread_info();
75 irqctx = hardirq_ctx[smp_processor_id()];
76
77 /*
78 * this is where we switch to the IRQ stack. However, if we are
79 * already using the IRQ stack (because we interrupted a hardirq
80 * handler) we can't do that and just have to keep using the
81 * current stack (which is the irq stack already after all)
82 */
83 if (curctx != irqctx) {
84 u32 *isp;
85
86 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
87 irqctx->tinfo.task = curctx->tinfo.task;
88 irqctx->tinfo.previous_sp = current_stack_pointer;
89
90 /*
91 * Copy the softirq bits in preempt_count so that the
92 * softirq checks work in the hardirq context.
93 */
94 irqctx->tinfo.preempt_count =
95 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
96 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
97
98 __asm__ __volatile__ (
99 "mov %0, r4 \n"
100 "mov r15, r8 \n"
101 "jsr @%1 \n"
102 /* swith to the irq stack */
103 " mov %2, r15 \n"
104 /* restore the stack (ring zero) */
105 "mov r8, r15 \n"
106 : /* no outputs */
107 : "r" (irq), "r" (generic_handle_irq), "r" (isp)
108 : "memory", "r0", "r1", "r2", "r3", "r4",
109 "r5", "r6", "r7", "r8", "t", "pr"
110 );
111 } else
112 generic_handle_irq(irq);
113 }
114
115 /*
116 * allocate per-cpu stacks for hardirq and for softirq processing
117 */
118 void irq_ctx_init(int cpu)
119 {
120 union irq_ctx *irqctx;
121
122 if (hardirq_ctx[cpu])
123 return;
124
125 irqctx = (union irq_ctx *)&hardirq_stack[cpu * THREAD_SIZE];
126 irqctx->tinfo.task = NULL;
127 irqctx->tinfo.cpu = cpu;
128 irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
129 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
130
131 hardirq_ctx[cpu] = irqctx;
132
133 irqctx = (union irq_ctx *)&softirq_stack[cpu * THREAD_SIZE];
134 irqctx->tinfo.task = NULL;
135 irqctx->tinfo.cpu = cpu;
136 irqctx->tinfo.preempt_count = 0;
137 irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
138
139 softirq_ctx[cpu] = irqctx;
140
141 printk("CPU %u irqstacks, hard=%p soft=%p\n",
142 cpu, hardirq_ctx[cpu], softirq_ctx[cpu]);
143 }
144
145 void irq_ctx_exit(int cpu)
146 {
147 hardirq_ctx[cpu] = NULL;
148 }
149
150 void do_softirq_own_stack(void)
151 {
152 struct thread_info *curctx;
153 union irq_ctx *irqctx;
154 u32 *isp;
155
156 curctx = current_thread_info();
157 irqctx = softirq_ctx[smp_processor_id()];
158 irqctx->tinfo.task = curctx->task;
159 irqctx->tinfo.previous_sp = current_stack_pointer;
160
161 /* build the stack frame on the softirq stack */
162 isp = (u32 *)((char *)irqctx + sizeof(*irqctx));
163
164 __asm__ __volatile__ (
165 "mov r15, r9 \n"
166 "jsr @%0 \n"
167 /* switch to the softirq stack */
168 " mov %1, r15 \n"
169 /* restore the thread stack */
170 "mov r9, r15 \n"
171 : /* no outputs */
172 : "r" (__do_softirq), "r" (isp)
173 : "memory", "r0", "r1", "r2", "r3", "r4",
174 "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
175 );
176 }
177 #else
178 static inline void handle_one_irq(unsigned int irq)
179 {
180 generic_handle_irq(irq);
181 }
182 #endif
183
184 asmlinkage __irq_entry int do_IRQ(unsigned int irq, struct pt_regs *regs)
185 {
186 struct pt_regs *old_regs = set_irq_regs(regs);
187
188 irq_enter();
189
190 irq = irq_demux(irq_lookup(irq));
191
192 if (irq != NO_IRQ_IGNORE) {
193 handle_one_irq(irq);
194 irq_finish(irq);
195 }
196
197 irq_exit();
198
199 set_irq_regs(old_regs);
200
201 return IRQ_HANDLED;
202 }
203
204 void __init init_IRQ(void)
205 {
206 plat_irq_setup();
207
208 /* Perform the machine specific initialisation */
209 if (sh_mv.mv_init_irq)
210 sh_mv.mv_init_irq();
211
212 intc_finalize();
213
214 irq_ctx_init(smp_processor_id());
215 }
216
217 #ifdef CONFIG_HOTPLUG_CPU
218 /*
219 * The CPU has been marked offline. Migrate IRQs off this CPU. If
220 * the affinity settings do not allow other CPUs, force them onto any
221 * available CPU.
222 */
223 void migrate_irqs(void)
224 {
225 unsigned int irq, cpu = smp_processor_id();
226
227 for_each_active_irq(irq) {
228 struct irq_data *data = irq_get_irq_data(irq);
229
230 if (data->node == cpu) {
231 unsigned int newcpu = cpumask_any_and(data->affinity,
232 cpu_online_mask);
233 if (newcpu >= nr_cpu_ids) {
234 pr_info_ratelimited("IRQ%u no longer affine to CPU%u\n",
235 irq, cpu);
236
237 cpumask_setall(data->affinity);
238 }
239 irq_set_affinity(irq, data->affinity);
240 }
241 }
242 }
243 #endif
This page took 0.053926 seconds and 5 git commands to generate.