CRYPTO: Fix more AES build errors
[deliverable/linux.git] / arch / arm / kernel / traps.c
CommitLineData
1da177e4
LT
1/*
2 * linux/arch/arm/kernel/traps.c
3 *
ab72b007 4 * Copyright (C) 1995-2009 Russell King
1da177e4
LT
5 * Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * 'traps.c' handles hardware exceptions after we have saved some state in
12 * 'linux/arch/arm/lib/traps.S'. Mostly a debugging aid, but will probably
13 * kill the offending process.
14 */
1da177e4 15#include <linux/signal.h>
1da177e4 16#include <linux/personality.h>
1da177e4 17#include <linux/kallsyms.h>
a9221de6
RK
18#include <linux/spinlock.h>
19#include <linux/uaccess.h>
67306da6 20#include <linux/hardirq.h>
a9221de6
RK
21#include <linux/kdebug.h>
22#include <linux/module.h>
23#include <linux/kexec.h>
87e040b6 24#include <linux/bug.h>
a9221de6 25#include <linux/delay.h>
1da177e4 26#include <linux/init.h>
425fc47a 27#include <linux/sched.h>
1da177e4 28
60063497 29#include <linux/atomic.h>
1da177e4 30#include <asm/cacheflush.h>
5a567d78 31#include <asm/exception.h>
1da177e4
LT
32#include <asm/unistd.h>
33#include <asm/traps.h>
bff595c1 34#include <asm/unwind.h>
f159f4ed 35#include <asm/tls.h>
9f97da78 36#include <asm/system_misc.h>
a79a0cb1 37#include <asm/opcodes.h>
1da177e4 38
1da177e4
LT
39static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
40
247055aa
CM
41void *vectors_page;
42
1da177e4
LT
43#ifdef CONFIG_DEBUG_USER
44unsigned int user_debug;
45
46static int __init user_debug_setup(char *str)
47{
48 get_option(&str, &user_debug);
49 return 1;
50}
51__setup("user_debug=", user_debug_setup);
52#endif
53
e40c2ec6 54static void dump_mem(const char *, const char *, unsigned long, unsigned long);
7ab3f8d5 55
7ab3f8d5 56void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
1da177e4
LT
57{
58#ifdef CONFIG_KALLSYMS
69448c2a 59 printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
1da177e4
LT
60#else
61 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
62#endif
7ab3f8d5
RK
63
64 if (in_exception_text(where))
e40c2ec6 65 dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
1da177e4
LT
66}
67
bff595c1 68#ifndef CONFIG_ARM_UNWIND
1da177e4
LT
69/*
70 * Stack pointers should always be within the kernels view of
71 * physical memory. If it is not there, then we can't dump
72 * out any information relating to the stack.
73 */
74static int verify_stack(unsigned long sp)
75{
09d9bae0
RK
76 if (sp < PAGE_OFFSET ||
77 (sp > (unsigned long)high_memory && high_memory != NULL))
1da177e4
LT
78 return -EFAULT;
79
80 return 0;
81}
bff595c1 82#endif
1da177e4
LT
83
84/*
85 * Dump out the contents of some memory nicely...
86 */
e40c2ec6
RK
87static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
88 unsigned long top)
1da177e4 89{
d191fe09 90 unsigned long first;
1da177e4
LT
91 mm_segment_t fs;
92 int i;
93
94 /*
95 * We need to switch to kernel mode so that we can use __get_user
96 * to safely read from kernel space. Note that we now dump the
97 * code first, just in case the backtrace kills us.
98 */
99 fs = get_fs();
100 set_fs(KERNEL_DS);
101
e40c2ec6 102 printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
1da177e4 103
d191fe09
RK
104 for (first = bottom & ~31; first < top; first += 32) {
105 unsigned long p;
106 char str[sizeof(" 12345678") * 8 + 1];
1da177e4 107
d191fe09
RK
108 memset(str, ' ', sizeof(str));
109 str[sizeof(str) - 1] = '\0';
1da177e4 110
d191fe09
RK
111 for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
112 if (p >= bottom && p < top) {
113 unsigned long val;
114 if (__get_user(val, (unsigned long *)p) == 0)
115 sprintf(str + i * 9, " %08lx", val);
116 else
117 sprintf(str + i * 9, " ????????");
1da177e4
LT
118 }
119 }
e40c2ec6 120 printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
1da177e4
LT
121 }
122
123 set_fs(fs);
124}
125
e40c2ec6 126static void dump_instr(const char *lvl, struct pt_regs *regs)
1da177e4
LT
127{
128 unsigned long addr = instruction_pointer(regs);
129 const int thumb = thumb_mode(regs);
130 const int width = thumb ? 4 : 8;
131 mm_segment_t fs;
d191fe09 132 char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
1da177e4
LT
133 int i;
134
135 /*
136 * We need to switch to kernel mode so that we can use __get_user
137 * to safely read from kernel space. Note that we now dump the
138 * code first, just in case the backtrace kills us.
139 */
140 fs = get_fs();
141 set_fs(KERNEL_DS);
142
a9011580 143 for (i = -4; i < 1 + !!thumb; i++) {
1da177e4
LT
144 unsigned int val, bad;
145
146 if (thumb)
147 bad = __get_user(val, &((u16 *)addr)[i]);
148 else
149 bad = __get_user(val, &((u32 *)addr)[i]);
150
151 if (!bad)
d191fe09
RK
152 p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
153 width, val);
1da177e4 154 else {
d191fe09 155 p += sprintf(p, "bad PC value");
1da177e4
LT
156 break;
157 }
158 }
e40c2ec6 159 printk("%sCode: %s\n", lvl, str);
1da177e4
LT
160
161 set_fs(fs);
162}
163
bff595c1
CM
164#ifdef CONFIG_ARM_UNWIND
165static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
166{
167 unwind_backtrace(regs, tsk);
168}
169#else
1da177e4
LT
170static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
171{
67a94c23 172 unsigned int fp, mode;
1da177e4
LT
173 int ok = 1;
174
175 printk("Backtrace: ");
67a94c23
CM
176
177 if (!tsk)
178 tsk = current;
179
180 if (regs) {
181 fp = regs->ARM_fp;
182 mode = processor_mode(regs);
183 } else if (tsk != current) {
184 fp = thread_saved_fp(tsk);
185 mode = 0x10;
186 } else {
187 asm("mov %0, fp" : "=r" (fp) : : "cc");
188 mode = 0x10;
189 }
190
1da177e4
LT
191 if (!fp) {
192 printk("no frame pointer");
193 ok = 0;
194 } else if (verify_stack(fp)) {
195 printk("invalid frame pointer 0x%08x", fp);
196 ok = 0;
55205823 197 } else if (fp < (unsigned long)end_of_stack(tsk))
1da177e4
LT
198 printk("frame pointer underflow");
199 printk("\n");
200
201 if (ok)
67a94c23 202 c_backtrace(fp, mode);
1da177e4 203}
bff595c1 204#endif
1da177e4 205
1da177e4
LT
206void show_stack(struct task_struct *tsk, unsigned long *sp)
207{
67a94c23 208 dump_backtrace(NULL, tsk);
1da177e4
LT
209 barrier();
210}
211
d9202429
RK
212#ifdef CONFIG_PREEMPT
213#define S_PREEMPT " PREEMPT"
214#else
215#define S_PREEMPT ""
216#endif
217#ifdef CONFIG_SMP
218#define S_SMP " SMP"
219#else
220#define S_SMP ""
221#endif
8211ca65
RK
222#ifdef CONFIG_THUMB2_KERNEL
223#define S_ISA " THUMB2"
224#else
225#define S_ISA " ARM"
226#endif
d9202429 227
02df19b4 228static int __die(const char *str, int err, struct pt_regs *regs)
1da177e4 229{
02df19b4 230 struct task_struct *tsk = current;
1da177e4 231 static int die_counter;
a9221de6 232 int ret;
1da177e4 233
8211ca65
RK
234 printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
235 S_ISA "\n", str, err, ++die_counter);
a9221de6
RK
236
237 /* trap and error numbers are mostly meaningless on ARM */
238 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
239 if (ret == NOTIFY_STOP)
02df19b4 240 return 1;
a9221de6 241
1da177e4 242 print_modules();
652a12ef 243 __show_regs(regs);
e40c2ec6 244 printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
02df19b4 245 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
1da177e4
LT
246
247 if (!user_mode(regs) || in_interrupt()) {
e40c2ec6 248 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
32d39a93 249 THREAD_SIZE + (unsigned long)task_stack_page(tsk));
1da177e4 250 dump_backtrace(regs, tsk);
e40c2ec6 251 dump_instr(KERN_EMERG, regs);
1da177e4 252 }
a9221de6 253
02df19b4 254 return 0;
d362979a 255}
1da177e4 256
02df19b4
RV
257static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
258static int die_owner = -1;
259static unsigned int die_nest_count;
d362979a 260
02df19b4 261static unsigned long oops_begin(void)
d362979a 262{
02df19b4
RV
263 int cpu;
264 unsigned long flags;
d362979a 265
d9202429
RK
266 oops_enter();
267
02df19b4
RV
268 /* racy, but better than risking deadlock. */
269 raw_local_irq_save(flags);
270 cpu = smp_processor_id();
271 if (!arch_spin_trylock(&die_lock)) {
272 if (cpu == die_owner)
273 /* nested oops. should stop eventually */;
274 else
275 arch_spin_lock(&die_lock);
276 }
277 die_nest_count++;
278 die_owner = cpu;
03a6e5bd 279 console_verbose();
d362979a 280 bust_spinlocks(1);
02df19b4
RV
281 return flags;
282}
a9221de6 283
02df19b4
RV
284static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
285{
286 if (regs && kexec_should_crash(current))
a9221de6
RK
287 crash_kexec(regs);
288
1da177e4 289 bust_spinlocks(0);
02df19b4 290 die_owner = -1;
373d4d09 291 add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
02df19b4
RV
292 die_nest_count--;
293 if (!die_nest_count)
294 /* Nest count reaches zero, release the lock. */
295 arch_spin_unlock(&die_lock);
296 raw_local_irq_restore(flags);
03a6e5bd 297 oops_exit();
31867499 298
d9202429
RK
299 if (in_interrupt())
300 panic("Fatal exception in interrupt");
cea6a4ba 301 if (panic_on_oops)
012c437d 302 panic("Fatal exception");
02df19b4
RV
303 if (signr)
304 do_exit(signr);
305}
306
307/*
308 * This function is protected against re-entrancy.
309 */
310void die(const char *str, struct pt_regs *regs, int err)
311{
312 enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
313 unsigned long flags = oops_begin();
314 int sig = SIGSEGV;
315
316 if (!user_mode(regs))
317 bug_type = report_bug(regs->ARM_pc, regs);
318 if (bug_type != BUG_TRAP_TYPE_NONE)
319 str = "Oops - BUG";
320
321 if (__die(str, err, regs))
322 sig = 0;
323
324 oops_end(flags, regs, sig);
1da177e4
LT
325}
326
1eeb66a1
CH
327void arm_notify_die(const char *str, struct pt_regs *regs,
328 struct siginfo *info, unsigned long err, unsigned long trap)
1da177e4
LT
329{
330 if (user_mode(regs)) {
331 current->thread.error_code = err;
332 current->thread.trap_no = trap;
333
334 force_sig_info(info->si_signo, info, current);
335 } else {
336 die(str, regs, err);
337 }
338}
339
87e040b6
SG
340#ifdef CONFIG_GENERIC_BUG
341
342int is_valid_bugaddr(unsigned long pc)
343{
344#ifdef CONFIG_THUMB2_KERNEL
63328070
BD
345 u16 bkpt;
346 u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
87e040b6 347#else
63328070
BD
348 u32 bkpt;
349 u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
87e040b6
SG
350#endif
351
352 if (probe_kernel_address((unsigned *)pc, bkpt))
353 return 0;
354
63328070 355 return bkpt == insn;
87e040b6
SG
356}
357
358#endif
359
1da177e4 360static LIST_HEAD(undef_hook);
bd31b859 361static DEFINE_RAW_SPINLOCK(undef_lock);
1da177e4
LT
362
363void register_undef_hook(struct undef_hook *hook)
364{
109d89ca
RK
365 unsigned long flags;
366
bd31b859 367 raw_spin_lock_irqsave(&undef_lock, flags);
1da177e4 368 list_add(&hook->node, &undef_hook);
bd31b859 369 raw_spin_unlock_irqrestore(&undef_lock, flags);
1da177e4
LT
370}
371
372void unregister_undef_hook(struct undef_hook *hook)
373{
109d89ca
RK
374 unsigned long flags;
375
bd31b859 376 raw_spin_lock_irqsave(&undef_lock, flags);
1da177e4 377 list_del(&hook->node);
bd31b859 378 raw_spin_unlock_irqrestore(&undef_lock, flags);
1da177e4
LT
379}
380
b03a5b75
RK
381static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
382{
383 struct undef_hook *hook;
384 unsigned long flags;
385 int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
386
bd31b859 387 raw_spin_lock_irqsave(&undef_lock, flags);
b03a5b75
RK
388 list_for_each_entry(hook, &undef_hook, node)
389 if ((instr & hook->instr_mask) == hook->instr_val &&
390 (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
391 fn = hook->fn;
bd31b859 392 raw_spin_unlock_irqrestore(&undef_lock, flags);
b03a5b75
RK
393
394 return fn ? fn(regs, instr) : 1;
395}
396
7ab3f8d5 397asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
1da177e4 398{
1da177e4 399 unsigned int instr;
1da177e4
LT
400 siginfo_t info;
401 void __user *pc;
402
1da177e4 403 pc = (void __user *)instruction_pointer(regs);
dfc544c7
DW
404
405 if (processor_mode(regs) == SVC_MODE) {
592201a9
JM
406#ifdef CONFIG_THUMB2_KERNEL
407 if (thumb_mode(regs)) {
a79a0cb1 408 instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
592201a9 409 if (is_wide_instruction(instr)) {
a79a0cb1
BD
410 u16 inst2;
411 inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
412 instr = __opcode_thumb32_compose(instr, inst2);
592201a9
JM
413 }
414 } else
415#endif
a79a0cb1 416 instr = __mem_to_opcode_arm(*(u32 *) pc);
dfc544c7 417 } else if (thumb_mode(regs)) {
2b2040af
WD
418 if (get_user(instr, (u16 __user *)pc))
419 goto die_sig;
a79a0cb1 420 instr = __mem_to_opcode_thumb16(instr);
592201a9
JM
421 if (is_wide_instruction(instr)) {
422 unsigned int instr2;
2b2040af
WD
423 if (get_user(instr2, (u16 __user *)pc+1))
424 goto die_sig;
a79a0cb1
BD
425 instr2 = __mem_to_opcode_thumb16(instr2);
426 instr = __opcode_thumb32_compose(instr, instr2);
592201a9 427 }
2b2040af 428 } else if (get_user(instr, (u32 __user *)pc)) {
a79a0cb1 429 instr = __mem_to_opcode_arm(instr);
2b2040af 430 goto die_sig;
1da177e4
LT
431 }
432
b03a5b75
RK
433 if (call_undef_hook(regs, instr) == 0)
434 return;
1da177e4 435
2b2040af 436die_sig:
1da177e4
LT
437#ifdef CONFIG_DEBUG_USER
438 if (user_debug & UDBG_UNDEFINED) {
439 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
19c5870c 440 current->comm, task_pid_nr(current), pc);
e40c2ec6 441 dump_instr(KERN_INFO, regs);
1da177e4
LT
442 }
443#endif
444
445 info.si_signo = SIGILL;
446 info.si_errno = 0;
447 info.si_code = ILL_ILLOPC;
448 info.si_addr = pc;
449
1eeb66a1 450 arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
1da177e4
LT
451}
452
453asmlinkage void do_unexp_fiq (struct pt_regs *regs)
454{
1da177e4
LT
455 printk("Hmm. Unexpected FIQ received, but trying to continue\n");
456 printk("You may have a hardware problem...\n");
1da177e4
LT
457}
458
459/*
460 * bad_mode handles the impossible case in the vectors. If you see one of
461 * these, then it's extremely serious, and could mean you have buggy hardware.
462 * It never returns, and never tries to sync. We hope that we can at least
463 * dump out some state information...
464 */
ae0a846e 465asmlinkage void bad_mode(struct pt_regs *regs, int reason)
1da177e4
LT
466{
467 console_verbose();
468
ae0a846e 469 printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]);
1da177e4
LT
470
471 die("Oops - bad mode", regs, 0);
472 local_irq_disable();
473 panic("bad mode");
474}
475
476static int bad_syscall(int n, struct pt_regs *regs)
477{
478 struct thread_info *thread = current_thread_info();
479 siginfo_t info;
480
88b9ef45 481 if ((current->personality & PER_MASK) != PER_LINUX &&
a999cb04 482 thread->exec_domain->handler) {
1da177e4
LT
483 thread->exec_domain->handler(n, regs);
484 return regs->ARM_r0;
485 }
486
487#ifdef CONFIG_DEBUG_USER
488 if (user_debug & UDBG_SYSCALL) {
489 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
19c5870c 490 task_pid_nr(current), current->comm, n);
e40c2ec6 491 dump_instr(KERN_ERR, regs);
1da177e4
LT
492 }
493#endif
494
495 info.si_signo = SIGILL;
496 info.si_errno = 0;
497 info.si_code = ILL_ILLTRP;
498 info.si_addr = (void __user *)instruction_pointer(regs) -
499 (thumb_mode(regs) ? 2 : 4);
500
1eeb66a1 501 arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
1da177e4
LT
502
503 return regs->ARM_r0;
504}
505
28256d61
WD
506static long do_cache_op_restart(struct restart_block *);
507
c5102f59 508static inline int
28256d61
WD
509__do_cache_op(unsigned long start, unsigned long end)
510{
511 int ret;
28256d61
WD
512
513 do {
b31459ad
JM
514 unsigned long chunk = min(PAGE_SIZE, end - start);
515
28256d61
WD
516 if (signal_pending(current)) {
517 struct thread_info *ti = current_thread_info();
518
519 ti->restart_block = (struct restart_block) {
520 .fn = do_cache_op_restart,
521 };
522
523 ti->arm_restart_block = (struct arm_restart_block) {
524 {
525 .cache = {
526 .start = start,
527 .end = end,
528 },
529 },
530 };
531
532 return -ERESTART_RESTARTBLOCK;
533 }
534
535 ret = flush_cache_user_range(start, start + chunk);
536 if (ret)
537 return ret;
538
539 cond_resched();
540 start += chunk;
541 } while (start < end);
542
543 return 0;
544}
545
546static long do_cache_op_restart(struct restart_block *unused)
1da177e4 547{
28256d61 548 struct arm_restart_block *restart_block;
1da177e4 549
28256d61
WD
550 restart_block = &current_thread_info()->arm_restart_block;
551 return __do_cache_op(restart_block->cache.start,
552 restart_block->cache.end);
553}
554
c5102f59 555static inline int
1da177e4
LT
556do_cache_op(unsigned long start, unsigned long end, int flags)
557{
1da177e4 558 if (end < start || flags)
c5102f59 559 return -EINVAL;
1da177e4 560
97c72d89
WD
561 if (!access_ok(VERIFY_READ, start, end - start))
562 return -EFAULT;
1da177e4 563
28256d61 564 return __do_cache_op(start, end);
1da177e4
LT
565}
566
567/*
568 * Handle all unrecognised system calls.
569 * 0x9f0000 - 0x9fffff are some more esoteric system calls
570 */
571#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
572asmlinkage int arm_syscall(int no, struct pt_regs *regs)
573{
574 struct thread_info *thread = current_thread_info();
575 siginfo_t info;
576
3f2829a3 577 if ((no >> 16) != (__ARM_NR_BASE>> 16))
1da177e4
LT
578 return bad_syscall(no, regs);
579
580 switch (no & 0xffff) {
581 case 0: /* branch through 0 */
582 info.si_signo = SIGSEGV;
583 info.si_errno = 0;
584 info.si_code = SEGV_MAPERR;
585 info.si_addr = NULL;
586
1eeb66a1 587 arm_notify_die("branch through zero", regs, &info, 0, 0);
1da177e4
LT
588 return 0;
589
590 case NR(breakpoint): /* SWI BREAK_POINT */
591 regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
592 ptrace_break(current, regs);
593 return regs->ARM_r0;
594
595 /*
596 * Flush a region from virtual address 'r0' to virtual address 'r1'
597 * _exclusive_. There is no alignment requirement on either address;
598 * user space does not need to know the hardware cache layout.
599 *
600 * r2 contains flags. It should ALWAYS be passed as ZERO until it
601 * is defined to be something else. For now we ignore it, but may
602 * the fires of hell burn in your belly if you break this rule. ;)
603 *
604 * (at a later date, we may want to allow this call to not flush
605 * various aspects of the cache. Passing '0' will guarantee that
606 * everything necessary gets flushed to maintain consistency in
607 * the specified region).
608 */
609 case NR(cacheflush):
c5102f59 610 return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
1da177e4
LT
611
612 case NR(usr26):
613 if (!(elf_hwcap & HWCAP_26BIT))
614 break;
615 regs->ARM_cpsr &= ~MODE32_BIT;
616 return regs->ARM_r0;
617
618 case NR(usr32):
619 if (!(elf_hwcap & HWCAP_26BIT))
620 break;
621 regs->ARM_cpsr |= MODE32_BIT;
622 return regs->ARM_r0;
623
624 case NR(set_tls):
a4780ade 625 thread->tp_value[0] = regs->ARM_r0;
f159f4ed
TL
626 if (tls_emu)
627 return 0;
628 if (has_tls_reg) {
629 asm ("mcr p15, 0, %0, c13, c0, 3"
630 : : "r" (regs->ARM_r0));
631 } else {
632 /*
633 * User space must never try to access this directly.
634 * Expect your app to break eventually if you do so.
635 * The user helper at 0xffff0fe0 must be used instead.
636 * (see entry-armv.S for details)
637 */
638 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
639 }
1da177e4
LT
640 return 0;
641
dcef1f63
NP
642#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
643 /*
644 * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
645 * Return zero in r0 if *MEM was changed or non-zero if no exchange
646 * happened. Also set the user C flag accordingly.
647 * If access permissions have to be fixed up then non-zero is
648 * returned and the operation has to be re-attempted.
649 *
650 * *NOTE*: This is a ghost syscall private to the kernel. Only the
651 * __kuser_cmpxchg code in entry-armv.S should be aware of its
652 * existence. Don't ever use this from user code.
653 */
cc20d429 654 case NR(cmpxchg):
b49c0f24 655 for (;;) {
dcef1f63
NP
656 extern void do_DataAbort(unsigned long addr, unsigned int fsr,
657 struct pt_regs *regs);
658 unsigned long val;
659 unsigned long addr = regs->ARM_r2;
660 struct mm_struct *mm = current->mm;
661 pgd_t *pgd; pmd_t *pmd; pte_t *pte;
69b04754 662 spinlock_t *ptl;
dcef1f63
NP
663
664 regs->ARM_cpsr &= ~PSR_C_BIT;
69b04754 665 down_read(&mm->mmap_sem);
dcef1f63
NP
666 pgd = pgd_offset(mm, addr);
667 if (!pgd_present(*pgd))
668 goto bad_access;
669 pmd = pmd_offset(pgd, addr);
670 if (!pmd_present(*pmd))
671 goto bad_access;
69b04754 672 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
373ce302 673 if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
69b04754 674 pte_unmap_unlock(pte, ptl);
dcef1f63 675 goto bad_access;
69b04754 676 }
dcef1f63
NP
677 val = *(unsigned long *)addr;
678 val -= regs->ARM_r0;
679 if (val == 0) {
680 *(unsigned long *)addr = regs->ARM_r1;
681 regs->ARM_cpsr |= PSR_C_BIT;
682 }
69b04754
HD
683 pte_unmap_unlock(pte, ptl);
684 up_read(&mm->mmap_sem);
dcef1f63
NP
685 return val;
686
687 bad_access:
69b04754 688 up_read(&mm->mmap_sem);
74f88494 689 /* simulate a write access fault */
dcef1f63 690 do_DataAbort(addr, 15 + (1 << 11), regs);
dcef1f63
NP
691 }
692#endif
693
1da177e4
LT
694 default:
695 /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
696 if not implemented, rather than raising SIGILL. This
697 way the calling program can gracefully determine whether
698 a feature is supported. */
bfd2e29f 699 if ((no & 0xffff) <= 0x7ff)
1da177e4
LT
700 return -ENOSYS;
701 break;
702 }
703#ifdef CONFIG_DEBUG_USER
704 /*
705 * experience shows that these seem to indicate that
706 * something catastrophic has happened
707 */
708 if (user_debug & UDBG_SYSCALL) {
709 printk("[%d] %s: arm syscall %d\n",
19c5870c 710 task_pid_nr(current), current->comm, no);
e40c2ec6 711 dump_instr("", regs);
1da177e4 712 if (user_mode(regs)) {
652a12ef 713 __show_regs(regs);
1da177e4
LT
714 c_backtrace(regs->ARM_fp, processor_mode(regs));
715 }
716 }
717#endif
718 info.si_signo = SIGILL;
719 info.si_errno = 0;
720 info.si_code = ILL_ILLTRP;
721 info.si_addr = (void __user *)instruction_pointer(regs) -
722 (thumb_mode(regs) ? 2 : 4);
723
1eeb66a1 724 arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
1da177e4
LT
725 return 0;
726}
727
4b0e07a5 728#ifdef CONFIG_TLS_REG_EMUL
2d2669b6
NP
729
730/*
731 * We might be running on an ARMv6+ processor which should have the TLS
4b0e07a5
NP
732 * register but for some reason we can't use it, or maybe an SMP system
733 * using a pre-ARMv6 processor (there are apparently a few prototypes like
734 * that in existence) and therefore access to that register must be
735 * emulated.
2d2669b6
NP
736 */
737
738static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
739{
740 int reg = (instr >> 12) & 15;
741 if (reg == 15)
742 return 1;
a4780ade 743 regs->uregs[reg] = current_thread_info()->tp_value[0];
2d2669b6
NP
744 regs->ARM_pc += 4;
745 return 0;
746}
747
748static struct undef_hook arm_mrc_hook = {
749 .instr_mask = 0x0fff0fff,
750 .instr_val = 0x0e1d0f70,
751 .cpsr_mask = PSR_T_BIT,
752 .cpsr_val = 0,
753 .fn = get_tp_trap,
754};
755
756static int __init arm_mrc_hook_init(void)
757{
758 register_undef_hook(&arm_mrc_hook);
759 return 0;
760}
761
762late_initcall(arm_mrc_hook_init);
763
764#endif
765
1da177e4
LT
766void __bad_xchg(volatile void *ptr, int size)
767{
768 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
769 __builtin_return_address(0), ptr, size);
770 BUG();
771}
772EXPORT_SYMBOL(__bad_xchg);
773
774/*
775 * A data abort trap was taken, but we did not handle the instruction.
776 * Try to abort the user program, or panic if it was the kernel.
777 */
778asmlinkage void
779baddataabort(int code, unsigned long instr, struct pt_regs *regs)
780{
781 unsigned long addr = instruction_pointer(regs);
782 siginfo_t info;
783
784#ifdef CONFIG_DEBUG_USER
785 if (user_debug & UDBG_BADABORT) {
786 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
19c5870c 787 task_pid_nr(current), current->comm, code, instr);
e40c2ec6 788 dump_instr(KERN_ERR, regs);
1da177e4
LT
789 show_pte(current->mm, addr);
790 }
791#endif
792
793 info.si_signo = SIGILL;
794 info.si_errno = 0;
795 info.si_code = ILL_ILLOPC;
796 info.si_addr = (void __user *)addr;
797
1eeb66a1 798 arm_notify_die("unknown data abort code", regs, &info, instr, 0);
1da177e4
LT
799}
800
1da177e4
LT
801void __readwrite_bug(const char *fn)
802{
803 printk("%s called, but not implemented\n", fn);
804 BUG();
805}
806EXPORT_SYMBOL(__readwrite_bug);
807
69529c0e 808void __pte_error(const char *file, int line, pte_t pte)
1da177e4 809{
29a38193 810 printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
1da177e4
LT
811}
812
69529c0e 813void __pmd_error(const char *file, int line, pmd_t pmd)
1da177e4 814{
29a38193 815 printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
1da177e4
LT
816}
817
69529c0e 818void __pgd_error(const char *file, int line, pgd_t pgd)
1da177e4 819{
29a38193 820 printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
1da177e4
LT
821}
822
823asmlinkage void __div0(void)
824{
825 printk("Division by zero in kernel.\n");
826 dump_stack();
827}
828EXPORT_SYMBOL(__div0);
829
830void abort(void)
831{
832 BUG();
833
834 /* if that doesn't kill us, halt */
835 panic("Oops failed to kill thread");
836}
837EXPORT_SYMBOL(abort);
838
839void __init trap_init(void)
5cbad0eb
JW
840{
841 return;
842}
843
f6f91b0d
RK
844#ifdef CONFIG_KUSER_HELPERS
845static void __init kuser_init(void *vectors)
f159f4ed 846{
f6f91b0d
RK
847 extern char __kuser_helper_start[], __kuser_helper_end[];
848 int kuser_sz = __kuser_helper_end - __kuser_helper_start;
849
850 memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
851
f159f4ed
TL
852 /*
853 * vectors + 0xfe0 = __kuser_get_tls
854 * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
855 */
856 if (tls_emu || has_tls_reg)
f6f91b0d
RK
857 memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
858}
859#else
5761704a 860static inline void __init kuser_init(void *vectors)
f6f91b0d 861{
f159f4ed 862}
f6f91b0d 863#endif
f159f4ed 864
94e5a85b 865void __init early_trap_init(void *vectors_base)
1da177e4 866{
55bdd694 867#ifndef CONFIG_CPU_V7M
94e5a85b 868 unsigned long vectors = (unsigned long)vectors_base;
7933523d
RK
869 extern char __stubs_start[], __stubs_end[];
870 extern char __vectors_start[], __vectors_end[];
f928d4f2 871 unsigned i;
1da177e4 872
94e5a85b
RK
873 vectors_page = vectors_base;
874
f928d4f2
RK
875 /*
876 * Poison the vectors page with an undefined instruction. This
877 * instruction is chosen to be undefined for both ARM and Thumb
878 * ISAs. The Thumb version is an undefined instruction with a
879 * branch back to the undefined instruction.
880 */
881 for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
882 ((u32 *)vectors_base)[i] = 0xe7fddef1;
883
7933523d 884 /*
2d2669b6
NP
885 * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
886 * into the vector page, mapped at 0xffff0000, and ensure these
887 * are visible to the instruction stream.
7933523d 888 */
c760fc19 889 memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
19accfd3 890 memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
e00d349e 891
f6f91b0d 892 kuser_init(vectors_base);
f159f4ed 893
19accfd3 894 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
1da177e4 895 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
55bdd694
CM
896#else /* ifndef CONFIG_CPU_V7M */
897 /*
898 * on V7-M there is no need to copy the vector table to a dedicated
899 * memory area. The address is configurable and so a table in the kernel
900 * image can be used.
901 */
902#endif
1da177e4 903}
This page took 1.276331 seconds and 5 git commands to generate.