sh: Move in the SH-5 traps.c impl.
authorPaul Mundt <lethal@linux-sh.org>
Sat, 10 Nov 2007 11:14:15 +0000 (20:14 +0900)
committerPaul Mundt <lethal@linux-sh.org>
Mon, 28 Jan 2008 04:18:43 +0000 (13:18 +0900)
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
arch/sh/kernel/Makefile_32
arch/sh/kernel/Makefile_64
arch/sh/kernel/traps.c [deleted file]
arch/sh/kernel/traps_32.c [new file with mode: 0644]
arch/sh/kernel/traps_64.c [new file with mode: 0644]
arch/sh64/kernel/traps.c [deleted file]

index bad6bf5ac47823f411e85a0d68f85239cd59b6b8..29b44eb3b93444c37f36f5182ffe941986bde7e7 100644 (file)
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
 
 obj-y  := debugtraps.o io.o io_generic.o irq.o machvec.o process_32.o \
           ptrace_32.o semaphore.o setup.o signal_32.o sys_sh.o syscalls.o \
-          time.o topology.o traps.o
+          time.o topology.o traps_32.o
 
 obj-y                          += cpu/ timers/
 obj-$(CONFIG_VSYSCALL)         += vsyscall/
index 1f27d5fd99a83281b028e2b7e6a50cfb0fd1a061..fb87d642d1c1594d14577c7e511731016b45c38f 100644 (file)
@@ -2,7 +2,7 @@ extra-y := head.o init_task.o vmlinux.lds
 
 obj-y  := debugtraps.o io.o io_generic.o irq.o machvec.o process_64.o \
           ptrace_64.o semaphore.o setup.o signal_64.o sys_sh64.o syscalls.o \
-          time.o topology.o traps.o
+          time.o topology.o traps_64.o
 
 obj-y                          += cpu/ timers/
 obj-$(CONFIG_VSYSCALL)         += vsyscall/
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
deleted file mode 100644 (file)
index cf99111..0000000
+++ /dev/null
@@ -1,947 +0,0 @@
-/*
- * 'traps.c' handles hardware traps and faults after we have saved some
- * state in 'entry.S'.
- *
- *  SuperH version: Copyright (C) 1999 Niibe Yutaka
- *                  Copyright (C) 2000 Philipp Rumpf
- *                  Copyright (C) 2000 David Howells
- *                  Copyright (C) 2002 - 2007 Paul Mundt
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#include <linux/kernel.h>
-#include <linux/ptrace.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
-#include <linux/io.h>
-#include <linux/bug.h>
-#include <linux/debug_locks.h>
-#include <linux/kdebug.h>
-#include <linux/kexec.h>
-#include <linux/limits.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-
-#ifdef CONFIG_SH_KGDB
-#include <asm/kgdb.h>
-#define CHK_REMOTE_DEBUG(regs)                 \
-{                                              \
-       if (kgdb_debug_hook && !user_mode(regs))\
-               (*kgdb_debug_hook)(regs);       \
-}
-#else
-#define CHK_REMOTE_DEBUG(regs)
-#endif
-
-#ifdef CONFIG_CPU_SH2
-# define TRAP_RESERVED_INST    4
-# define TRAP_ILLEGAL_SLOT_INST        6
-# define TRAP_ADDRESS_ERROR    9
-# ifdef CONFIG_CPU_SH2A
-#  define TRAP_DIVZERO_ERROR   17
-#  define TRAP_DIVOVF_ERROR    18
-# endif
-#else
-#define TRAP_RESERVED_INST     12
-#define TRAP_ILLEGAL_SLOT_INST 13
-#endif
-
-static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
-{
-       unsigned long p;
-       int i;
-
-       printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
-
-       for (p = bottom & ~31; p < top; ) {
-               printk("%04lx: ", p & 0xffff);
-
-               for (i = 0; i < 8; i++, p += 4) {
-                       unsigned int val;
-
-                       if (p < bottom || p >= top)
-                               printk("         ");
-                       else {
-                               if (__get_user(val, (unsigned int __user *)p)) {
-                                       printk("\n");
-                                       return;
-                               }
-                               printk("%08x ", val);
-                       }
-               }
-               printk("\n");
-       }
-}
-
-static DEFINE_SPINLOCK(die_lock);
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
-       static int die_counter;
-
-       oops_enter();
-
-       console_verbose();
-       spin_lock_irq(&die_lock);
-       bust_spinlocks(1);
-
-       printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
-
-       CHK_REMOTE_DEBUG(regs);
-       print_modules();
-       show_regs(regs);
-
-       printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
-                       task_pid_nr(current), task_stack_page(current) + 1);
-
-       if (!user_mode(regs) || in_interrupt())
-               dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
-                        (unsigned long)task_stack_page(current));
-
-       bust_spinlocks(0);
-       add_taint(TAINT_DIE);
-       spin_unlock_irq(&die_lock);
-
-       if (kexec_should_crash(current))
-               crash_kexec(regs);
-
-       if (in_interrupt())
-               panic("Fatal exception in interrupt");
-
-       if (panic_on_oops)
-               panic("Fatal exception");
-
-       oops_exit();
-       do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char *str, struct pt_regs *regs,
-                                long err)
-{
-       if (!user_mode(regs))
-               die(str, regs, err);
-}
-
-/*
- * try and fix up kernelspace address errors
- * - userspace errors just cause EFAULT to be returned, resulting in SEGV
- * - kernel/userspace interfaces cause a jump to an appropriate handler
- * - other kernel errors are bad
- * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
- */
-static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-{
-       if (!user_mode(regs)) {
-               const struct exception_table_entry *fixup;
-               fixup = search_exception_tables(regs->pc);
-               if (fixup) {
-                       regs->pc = fixup->fixup;
-                       return 0;
-               }
-               die(str, regs, err);
-       }
-       return -EFAULT;
-}
-
-/*
- * handle an instruction that does an unaligned memory access by emulating the
- * desired behaviour
- * - note that PC _may not_ point to the faulting instruction
- *   (if that instruction is in a branch delay slot)
- * - return 0 if emulation okay, -EFAULT on existential error
- */
-static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
-{
-       int ret, index, count;
-       unsigned long *rm, *rn;
-       unsigned char *src, *dst;
-
-       index = (instruction>>8)&15;    /* 0x0F00 */
-       rn = &regs->regs[index];
-
-       index = (instruction>>4)&15;    /* 0x00F0 */
-       rm = &regs->regs[index];
-
-       count = 1<<(instruction&3);
-
-       ret = -EFAULT;
-       switch (instruction>>12) {
-       case 0: /* mov.[bwl] to/from memory via r0+rn */
-               if (instruction & 8) {
-                       /* from memory */
-                       src = (unsigned char*) *rm;
-                       src += regs->regs[0];
-                       dst = (unsigned char*) rn;
-                       *(unsigned long*)dst = 0;
-
-#ifdef __LITTLE_ENDIAN__
-                       if (copy_from_user(dst, src, count))
-                               goto fetch_fault;
-
-                       if ((count == 2) && dst[1] & 0x80) {
-                               dst[2] = 0xff;
-                               dst[3] = 0xff;
-                       }
-#else
-                       dst += 4-count;
-
-                       if (__copy_user(dst, src, count))
-                               goto fetch_fault;
-
-                       if ((count == 2) && dst[2] & 0x80) {
-                               dst[0] = 0xff;
-                               dst[1] = 0xff;
-                       }
-#endif
-               } else {
-                       /* to memory */
-                       src = (unsigned char*) rm;
-#if !defined(__LITTLE_ENDIAN__)
-                       src += 4-count;
-#endif
-                       dst = (unsigned char*) *rn;
-                       dst += regs->regs[0];
-
-                       if (copy_to_user(dst, src, count))
-                               goto fetch_fault;
-               }
-               ret = 0;
-               break;
-
-       case 1: /* mov.l Rm,@(disp,Rn) */
-               src = (unsigned char*) rm;
-               dst = (unsigned char*) *rn;
-               dst += (instruction&0x000F)<<2;
-
-               if (copy_to_user(dst,src,4))
-                       goto fetch_fault;
-               ret = 0;
-               break;
-
-       case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
-               if (instruction & 4)
-                       *rn -= count;
-               src = (unsigned char*) rm;
-               dst = (unsigned char*) *rn;
-#if !defined(__LITTLE_ENDIAN__)
-               src += 4-count;
-#endif
-               if (copy_to_user(dst, src, count))
-                       goto fetch_fault;
-               ret = 0;
-               break;
-
-       case 5: /* mov.l @(disp,Rm),Rn */
-               src = (unsigned char*) *rm;
-               src += (instruction&0x000F)<<2;
-               dst = (unsigned char*) rn;
-               *(unsigned long*)dst = 0;
-
-               if (copy_from_user(dst,src,4))
-                       goto fetch_fault;
-               ret = 0;
-               break;
-
-       case 6: /* mov.[bwl] from memory, possibly with post-increment */
-               src = (unsigned char*) *rm;
-               if (instruction & 4)
-                       *rm += count;
-               dst = (unsigned char*) rn;
-               *(unsigned long*)dst = 0;
-
-#ifdef __LITTLE_ENDIAN__
-               if (copy_from_user(dst, src, count))
-                       goto fetch_fault;
-
-               if ((count == 2) && dst[1] & 0x80) {
-                       dst[2] = 0xff;
-                       dst[3] = 0xff;
-               }
-#else
-               dst += 4-count;
-
-               if (copy_from_user(dst, src, count))
-                       goto fetch_fault;
-
-               if ((count == 2) && dst[2] & 0x80) {
-                       dst[0] = 0xff;
-                       dst[1] = 0xff;
-               }
-#endif
-               ret = 0;
-               break;
-
-       case 8:
-               switch ((instruction&0xFF00)>>8) {
-               case 0x81: /* mov.w R0,@(disp,Rn) */
-                       src = (unsigned char*) &regs->regs[0];
-#if !defined(__LITTLE_ENDIAN__)
-                       src += 2;
-#endif
-                       dst = (unsigned char*) *rm; /* called Rn in the spec */
-                       dst += (instruction&0x000F)<<1;
-
-                       if (copy_to_user(dst, src, 2))
-                               goto fetch_fault;
-                       ret = 0;
-                       break;
-
-               case 0x85: /* mov.w @(disp,Rm),R0 */
-                       src = (unsigned char*) *rm;
-                       src += (instruction&0x000F)<<1;
-                       dst = (unsigned char*) &regs->regs[0];
-                       *(unsigned long*)dst = 0;
-
-#if !defined(__LITTLE_ENDIAN__)
-                       dst += 2;
-#endif
-
-                       if (copy_from_user(dst, src, 2))
-                               goto fetch_fault;
-
-#ifdef __LITTLE_ENDIAN__
-                       if (dst[1] & 0x80) {
-                               dst[2] = 0xff;
-                               dst[3] = 0xff;
-                       }
-#else
-                       if (dst[2] & 0x80) {
-                               dst[0] = 0xff;
-                               dst[1] = 0xff;
-                       }
-#endif
-                       ret = 0;
-                       break;
-               }
-               break;
-       }
-       return ret;
-
- fetch_fault:
-       /* Argh. Address not only misaligned but also non-existent.
-        * Raise an EFAULT and see if it's trapped
-        */
-       return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
-}
-
-/*
- * emulate the instruction in the delay slot
- * - fetches the instruction from PC+2
- */
-static inline int handle_unaligned_delayslot(struct pt_regs *regs)
-{
-       u16 instruction;
-
-       if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
-               /* the instruction-fetch faulted */
-               if (user_mode(regs))
-                       return -EFAULT;
-
-               /* kernel */
-               die("delay-slot-insn faulting in handle_unaligned_delayslot",
-                   regs, 0);
-       }
-
-       return handle_unaligned_ins(instruction,regs);
-}
-
-/*
- * handle an instruction that does an unaligned memory access
- * - have to be careful of branch delay-slot instructions that fault
- *  SH3:
- *   - if the branch would be taken PC points to the branch
- *   - if the branch would not be taken, PC points to delay-slot
- *  SH4:
- *   - PC always points to delayed branch
- * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
- */
-
-/* Macros to determine offset from current PC for branch instructions */
-/* Explicit type coercion is used to force sign extension where needed */
-#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
-#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
-
-/*
- * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
- * opcodes..
- */
-#ifndef CONFIG_CPU_SH2A
-static int handle_unaligned_notify_count = 10;
-
-static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
-{
-       u_int rm;
-       int ret, index;
-
-       index = (instruction>>8)&15;    /* 0x0F00 */
-       rm = regs->regs[index];
-
-       /* shout about the first ten userspace fixups */
-       if (user_mode(regs) && handle_unaligned_notify_count>0) {
-               handle_unaligned_notify_count--;
-
-               printk(KERN_NOTICE "Fixing up unaligned userspace access "
-                      "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
-                      current->comm, task_pid_nr(current),
-                      (u16 *)regs->pc, instruction);
-       }
-
-       ret = -EFAULT;
-       switch (instruction&0xF000) {
-       case 0x0000:
-               if (instruction==0x000B) {
-                       /* rts */
-                       ret = handle_unaligned_delayslot(regs);
-                       if (ret==0)
-                               regs->pc = regs->pr;
-               }
-               else if ((instruction&0x00FF)==0x0023) {
-                       /* braf @Rm */
-                       ret = handle_unaligned_delayslot(regs);
-                       if (ret==0)
-                               regs->pc += rm + 4;
-               }
-               else if ((instruction&0x00FF)==0x0003) {
-                       /* bsrf @Rm */
-                       ret = handle_unaligned_delayslot(regs);
-                       if (ret==0) {
-                               regs->pr = regs->pc + 4;
-                               regs->pc += rm + 4;
-                       }
-               }
-               else {
-                       /* mov.[bwl] to/from memory via r0+rn */
-                       goto simple;
-               }
-               break;
-
-       case 0x1000: /* mov.l Rm,@(disp,Rn) */
-               goto simple;
-
-       case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
-               goto simple;
-
-       case 0x4000:
-               if ((instruction&0x00FF)==0x002B) {
-                       /* jmp @Rm */
-                       ret = handle_unaligned_delayslot(regs);
-                       if (ret==0)
-                               regs->pc = rm;
-               }
-               else if ((instruction&0x00FF)==0x000B) {
-                       /* jsr @Rm */
-                       ret = handle_unaligned_delayslot(regs);
-                       if (ret==0) {
-                               regs->pr = regs->pc + 4;
-                               regs->pc = rm;
-                       }
-               }
-               else {
-                       /* mov.[bwl] to/from memory via r0+rn */
-                       goto simple;
-               }
-               break;
-
-       case 0x5000: /* mov.l @(disp,Rm),Rn */
-               goto simple;
-
-       case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
-               goto simple;
-
-       case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
-               switch (instruction&0x0F00) {
-               case 0x0100: /* mov.w R0,@(disp,Rm) */
-                       goto simple;
-               case 0x0500: /* mov.w @(disp,Rm),R0 */
-                       goto simple;
-               case 0x0B00: /* bf   lab - no delayslot*/
-                       break;
-               case 0x0F00: /* bf/s lab */
-                       ret = handle_unaligned_delayslot(regs);
-                       if (ret==0) {
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
-                               if ((regs->sr & 0x00000001) != 0)
-                                       regs->pc += 4; /* next after slot */
-                               else
-#endif
-                                       regs->pc += SH_PC_8BIT_OFFSET(instruction);
-                       }
-                       break;
-               case 0x0900: /* bt   lab - no delayslot */
-                       break;
-               case 0x0D00: /* bt/s lab */
-                       ret = handle_unaligned_delayslot(regs);
-                       if (ret==0) {
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
-                               if ((regs->sr & 0x00000001) == 0)
-                                       regs->pc += 4; /* next after slot */
-                               else
-#endif
-                                       regs->pc += SH_PC_8BIT_OFFSET(instruction);
-                       }
-                       break;
-               }
-               break;
-
-       case 0xA000: /* bra label */
-               ret = handle_unaligned_delayslot(regs);
-               if (ret==0)
-                       regs->pc += SH_PC_12BIT_OFFSET(instruction);
-               break;
-
-       case 0xB000: /* bsr label */
-               ret = handle_unaligned_delayslot(regs);
-               if (ret==0) {
-                       regs->pr = regs->pc + 4;
-                       regs->pc += SH_PC_12BIT_OFFSET(instruction);
-               }
-               break;
-       }
-       return ret;
-
-       /* handle non-delay-slot instruction */
- simple:
-       ret = handle_unaligned_ins(instruction,regs);
-       if (ret==0)
-               regs->pc += instruction_size(instruction);
-       return ret;
-}
-#endif /* CONFIG_CPU_SH2A */
-
-#ifdef CONFIG_CPU_HAS_SR_RB
-#define lookup_exception_vector(x)     \
-       __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
-#else
-#define lookup_exception_vector(x)     \
-       __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
-#endif
-
-/*
- * Handle various address error exceptions:
- *  - instruction address error:
- *       misaligned PC
- *       PC >= 0x80000000 in user mode
- *  - data address error (read and write)
- *       misaligned data access
- *       access to >= 0x80000000 is user mode
- * Unfortuntaly we can't distinguish between instruction address error
- * and data address errors caused by read accesses.
- */
-asmlinkage void do_address_error(struct pt_regs *regs,
-                                unsigned long writeaccess,
-                                unsigned long address)
-{
-       unsigned long error_code = 0;
-       mm_segment_t oldfs;
-       siginfo_t info;
-#ifndef CONFIG_CPU_SH2A
-       u16 instruction;
-       int tmp;
-#endif
-
-       /* Intentional ifdef */
-#ifdef CONFIG_CPU_HAS_SR_RB
-       lookup_exception_vector(error_code);
-#endif
-
-       oldfs = get_fs();
-
-       if (user_mode(regs)) {
-               int si_code = BUS_ADRERR;
-
-               local_irq_enable();
-
-               /* bad PC is not something we can fix */
-               if (regs->pc & 1) {
-                       si_code = BUS_ADRALN;
-                       goto uspace_segv;
-               }
-
-#ifndef CONFIG_CPU_SH2A
-               set_fs(USER_DS);
-               if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
-                       /* Argh. Fault on the instruction itself.
-                          This should never happen non-SMP
-                       */
-                       set_fs(oldfs);
-                       goto uspace_segv;
-               }
-
-               tmp = handle_unaligned_access(instruction, regs);
-               set_fs(oldfs);
-
-               if (tmp==0)
-                       return; /* sorted */
-#endif
-
-uspace_segv:
-               printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
-                      "access (PC %lx PR %lx)\n", current->comm, regs->pc,
-                      regs->pr);
-
-               info.si_signo = SIGBUS;
-               info.si_errno = 0;
-               info.si_code = si_code;
-               info.si_addr = (void __user *)address;
-               force_sig_info(SIGBUS, &info, current);
-       } else {
-               if (regs->pc & 1)
-                       die("unaligned program counter", regs, error_code);
-
-#ifndef CONFIG_CPU_SH2A
-               set_fs(KERNEL_DS);
-               if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
-                       /* Argh. Fault on the instruction itself.
-                          This should never happen non-SMP
-                       */
-                       set_fs(oldfs);
-                       die("insn faulting in do_address_error", regs, 0);
-               }
-
-               handle_unaligned_access(instruction, regs);
-               set_fs(oldfs);
-#else
-               printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
-                      "access\n", current->comm);
-
-               force_sig(SIGSEGV, current);
-#endif
-       }
-}
-
-#ifdef CONFIG_SH_DSP
-/*
- *     SH-DSP support gerg@snapgear.com.
- */
-int is_dsp_inst(struct pt_regs *regs)
-{
-       unsigned short inst = 0;
-
-       /*
-        * Safe guard if DSP mode is already enabled or we're lacking
-        * the DSP altogether.
-        */
-       if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
-               return 0;
-
-       get_user(inst, ((unsigned short *) regs->pc));
-
-       inst &= 0xf000;
-
-       /* Check for any type of DSP or support instruction */
-       if ((inst == 0xf000) || (inst == 0x4000))
-               return 1;
-
-       return 0;
-}
-#else
-#define is_dsp_inst(regs)      (0)
-#endif /* CONFIG_SH_DSP */
-
-#ifdef CONFIG_CPU_SH2A
-asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
-                               unsigned long r6, unsigned long r7,
-                               struct pt_regs __regs)
-{
-       siginfo_t info;
-
-       switch (r4) {
-       case TRAP_DIVZERO_ERROR:
-               info.si_code = FPE_INTDIV;
-               break;
-       case TRAP_DIVOVF_ERROR:
-               info.si_code = FPE_INTOVF;
-               break;
-       }
-
-       force_sig_info(SIGFPE, &info, current);
-}
-#endif
-
-/* arch/sh/kernel/cpu/sh4/fpu.c */
-extern int do_fpu_inst(unsigned short, struct pt_regs *);
-extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
-               unsigned long r6, unsigned long r7, struct pt_regs __regs);
-
-asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
-                               unsigned long r6, unsigned long r7,
-                               struct pt_regs __regs)
-{
-       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-       unsigned long error_code;
-       struct task_struct *tsk = current;
-
-#ifdef CONFIG_SH_FPU_EMU
-       unsigned short inst = 0;
-       int err;
-
-       get_user(inst, (unsigned short*)regs->pc);
-
-       err = do_fpu_inst(inst, regs);
-       if (!err) {
-               regs->pc += instruction_size(inst);
-               return;
-       }
-       /* not a FPU inst. */
-#endif
-
-#ifdef CONFIG_SH_DSP
-       /* Check if it's a DSP instruction */
-       if (is_dsp_inst(regs)) {
-               /* Enable DSP mode, and restart instruction. */
-               regs->sr |= SR_DSP;
-               return;
-       }
-#endif
-
-       lookup_exception_vector(error_code);
-
-       local_irq_enable();
-       CHK_REMOTE_DEBUG(regs);
-       force_sig(SIGILL, tsk);
-       die_if_no_fixup("reserved instruction", regs, error_code);
-}
-
-#ifdef CONFIG_SH_FPU_EMU
-static int emulate_branch(unsigned short inst, struct pt_regs* regs)
-{
-       /*
-        * bfs: 8fxx: PC+=d*2+4;
-        * bts: 8dxx: PC+=d*2+4;
-        * bra: axxx: PC+=D*2+4;
-        * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
-        * braf:0x23: PC+=Rn*2+4;
-        * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
-        * jmp: 4x2b: PC=Rn;
-        * jsr: 4x0b: PC=Rn      after PR=PC+4;
-        * rts: 000b: PC=PR;
-        */
-       if ((inst & 0xfd00) == 0x8d00) {
-               regs->pc += SH_PC_8BIT_OFFSET(inst);
-               return 0;
-       }
-
-       if ((inst & 0xe000) == 0xa000) {
-               regs->pc += SH_PC_12BIT_OFFSET(inst);
-               return 0;
-       }
-
-       if ((inst & 0xf0df) == 0x0003) {
-               regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
-               return 0;
-       }
-
-       if ((inst & 0xf0df) == 0x400b) {
-               regs->pc = regs->regs[(inst & 0x0f00) >> 8];
-               return 0;
-       }
-
-       if ((inst & 0xffff) == 0x000b) {
-               regs->pc = regs->pr;
-               return 0;
-       }
-
-       return 1;
-}
-#endif
-
-asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
-                               unsigned long r6, unsigned long r7,
-                               struct pt_regs __regs)
-{
-       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-       unsigned long error_code;
-       struct task_struct *tsk = current;
-#ifdef CONFIG_SH_FPU_EMU
-       unsigned short inst = 0;
-
-       get_user(inst, (unsigned short *)regs->pc + 1);
-       if (!do_fpu_inst(inst, regs)) {
-               get_user(inst, (unsigned short *)regs->pc);
-               if (!emulate_branch(inst, regs))
-                       return;
-               /* fault in branch.*/
-       }
-       /* not a FPU inst. */
-#endif
-
-       lookup_exception_vector(error_code);
-
-       local_irq_enable();
-       CHK_REMOTE_DEBUG(regs);
-       force_sig(SIGILL, tsk);
-       die_if_no_fixup("illegal slot instruction", regs, error_code);
-}
-
-asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
-                                  unsigned long r6, unsigned long r7,
-                                  struct pt_regs __regs)
-{
-       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
-       long ex;
-
-       lookup_exception_vector(ex);
-       die_if_kernel("exception", regs, ex);
-}
-
-#if defined(CONFIG_SH_STANDARD_BIOS)
-void *gdb_vbr_vector;
-
-static inline void __init gdb_vbr_init(void)
-{
-       register unsigned long vbr;
-
-       /*
-        * Read the old value of the VBR register to initialise
-        * the vector through which debug and BIOS traps are
-        * delegated by the Linux trap handler.
-        */
-       asm volatile("stc vbr, %0" : "=r" (vbr));
-
-       gdb_vbr_vector = (void *)(vbr + 0x100);
-       printk("Setting GDB trap vector to 0x%08lx\n",
-              (unsigned long)gdb_vbr_vector);
-}
-#endif
-
-void __cpuinit per_cpu_trap_init(void)
-{
-       extern void *vbr_base;
-
-#ifdef CONFIG_SH_STANDARD_BIOS
-       if (raw_smp_processor_id() == 0)
-               gdb_vbr_init();
-#endif
-
-       /* NOTE: The VBR value should be at P1
-          (or P2, virtural "fixed" address space).
-          It's definitely should not in physical address.  */
-
-       asm volatile("ldc       %0, vbr"
-                    : /* no output */
-                    : "r" (&vbr_base)
-                    : "memory");
-}
-
-void *set_exception_table_vec(unsigned int vec, void *handler)
-{
-       extern void *exception_handling_table[];
-       void *old_handler;
-
-       old_handler = exception_handling_table[vec];
-       exception_handling_table[vec] = handler;
-       return old_handler;
-}
-
-extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
-                                            unsigned long r6, unsigned long r7,
-                                            struct pt_regs __regs);
-
-void __init trap_init(void)
-{
-       set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
-       set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
-
-#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
-    defined(CONFIG_SH_FPU_EMU)
-       /*
-        * For SH-4 lacking an FPU, treat floating point instructions as
-        * reserved. They'll be handled in the math-emu case, or faulted on
-        * otherwise.
-        */
-       set_exception_table_evt(0x800, do_reserved_inst);
-       set_exception_table_evt(0x820, do_illegal_slot_inst);
-#elif defined(CONFIG_SH_FPU)
-#ifdef CONFIG_CPU_SUBTYPE_SHX3
-       set_exception_table_evt(0xd80, do_fpu_state_restore);
-       set_exception_table_evt(0xda0, do_fpu_state_restore);
-#else
-       set_exception_table_evt(0x800, do_fpu_state_restore);
-       set_exception_table_evt(0x820, do_fpu_state_restore);
-#endif
-#endif
-
-#ifdef CONFIG_CPU_SH2
-       set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
-#endif
-#ifdef CONFIG_CPU_SH2A
-       set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
-       set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
-#endif
-
-       /* Setup VBR for boot cpu */
-       per_cpu_trap_init();
-}
-
-#ifdef CONFIG_BUG
-void handle_BUG(struct pt_regs *regs)
-{
-       enum bug_trap_type tt;
-       tt = report_bug(regs->pc, regs);
-       if (tt == BUG_TRAP_TYPE_WARN) {
-               regs->pc += 2;
-               return;
-       }
-
-       die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
-}
-
-int is_valid_bugaddr(unsigned long addr)
-{
-       return addr >= PAGE_OFFSET;
-}
-#endif
-
-void show_trace(struct task_struct *tsk, unsigned long *sp,
-               struct pt_regs *regs)
-{
-       unsigned long addr;
-
-       if (regs && user_mode(regs))
-               return;
-
-       printk("\nCall trace: ");
-#ifdef CONFIG_KALLSYMS
-       printk("\n");
-#endif
-
-       while (!kstack_end(sp)) {
-               addr = *sp++;
-               if (kernel_text_address(addr))
-                       print_ip_sym(addr);
-       }
-
-       printk("\n");
-
-       if (!tsk)
-               tsk = current;
-
-       debug_show_held_locks(tsk);
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
-       unsigned long stack;
-
-       if (!tsk)
-               tsk = current;
-       if (tsk == current)
-               sp = (unsigned long *)current_stack_pointer;
-       else
-               sp = (unsigned long *)tsk->thread.sp;
-
-       stack = (unsigned long)sp;
-       dump_mem("Stack: ", stack, THREAD_SIZE +
-                (unsigned long)task_stack_page(tsk));
-       show_trace(tsk, sp, NULL);
-}
-
-void dump_stack(void)
-{
-       show_stack(NULL, NULL);
-}
-EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_32.c b/arch/sh/kernel/traps_32.c
new file mode 100644 (file)
index 0000000..cf99111
--- /dev/null
@@ -0,0 +1,947 @@
+/*
+ * 'traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ *
+ *  SuperH version: Copyright (C) 1999 Niibe Yutaka
+ *                  Copyright (C) 2000 Philipp Rumpf
+ *                  Copyright (C) 2000 David Howells
+ *                  Copyright (C) 2002 - 2007 Paul Mundt
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/debug_locks.h>
+#include <linux/kdebug.h>
+#include <linux/kexec.h>
+#include <linux/limits.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_SH_KGDB
+#include <asm/kgdb.h>
+#define CHK_REMOTE_DEBUG(regs)                 \
+{                                              \
+       if (kgdb_debug_hook && !user_mode(regs))\
+               (*kgdb_debug_hook)(regs);       \
+}
+#else
+#define CHK_REMOTE_DEBUG(regs)
+#endif
+
+#ifdef CONFIG_CPU_SH2
+# define TRAP_RESERVED_INST    4
+# define TRAP_ILLEGAL_SLOT_INST        6
+# define TRAP_ADDRESS_ERROR    9
+# ifdef CONFIG_CPU_SH2A
+#  define TRAP_DIVZERO_ERROR   17
+#  define TRAP_DIVOVF_ERROR    18
+# endif
+#else
+#define TRAP_RESERVED_INST     12
+#define TRAP_ILLEGAL_SLOT_INST 13
+#endif
+
+static void dump_mem(const char *str, unsigned long bottom, unsigned long top)
+{
+       unsigned long p;
+       int i;
+
+       printk("%s(0x%08lx to 0x%08lx)\n", str, bottom, top);
+
+       for (p = bottom & ~31; p < top; ) {
+               printk("%04lx: ", p & 0xffff);
+
+               for (i = 0; i < 8; i++, p += 4) {
+                       unsigned int val;
+
+                       if (p < bottom || p >= top)
+                               printk("         ");
+                       else {
+                               if (__get_user(val, (unsigned int __user *)p)) {
+                                       printk("\n");
+                                       return;
+                               }
+                               printk("%08x ", val);
+                       }
+               }
+               printk("\n");
+       }
+}
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+       static int die_counter;
+
+       oops_enter();
+
+       console_verbose();
+       spin_lock_irq(&die_lock);
+       bust_spinlocks(1);
+
+       printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+
+       CHK_REMOTE_DEBUG(regs);
+       print_modules();
+       show_regs(regs);
+
+       printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
+                       task_pid_nr(current), task_stack_page(current) + 1);
+
+       if (!user_mode(regs) || in_interrupt())
+               dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
+                        (unsigned long)task_stack_page(current));
+
+       bust_spinlocks(0);
+       add_taint(TAINT_DIE);
+       spin_unlock_irq(&die_lock);
+
+       if (kexec_should_crash(current))
+               crash_kexec(regs);
+
+       if (in_interrupt())
+               panic("Fatal exception in interrupt");
+
+       if (panic_on_oops)
+               panic("Fatal exception");
+
+       oops_exit();
+       do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char *str, struct pt_regs *regs,
+                                long err)
+{
+       if (!user_mode(regs))
+               die(str, regs, err);
+}
+
+/*
+ * try and fix up kernelspace address errors
+ * - userspace errors just cause EFAULT to be returned, resulting in SEGV
+ * - kernel/userspace interfaces cause a jump to an appropriate handler
+ * - other kernel errors are bad
+ * - return 0 if fixed-up, -EFAULT if non-fatal (to the kernel) fault
+ */
+static int die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+       if (!user_mode(regs)) {
+               const struct exception_table_entry *fixup;
+               fixup = search_exception_tables(regs->pc);
+               if (fixup) {
+                       regs->pc = fixup->fixup;
+                       return 0;
+               }
+               die(str, regs, err);
+       }
+       return -EFAULT;
+}
+
+/*
+ * handle an instruction that does an unaligned memory access by emulating the
+ * desired behaviour
+ * - note that PC _may not_ point to the faulting instruction
+ *   (if that instruction is in a branch delay slot)
+ * - return 0 if emulation okay, -EFAULT on existential error
+ */
+static int handle_unaligned_ins(u16 instruction, struct pt_regs *regs)
+{
+       int ret, index, count;
+       unsigned long *rm, *rn;
+       unsigned char *src, *dst;
+
+       index = (instruction>>8)&15;    /* 0x0F00 */
+       rn = &regs->regs[index];
+
+       index = (instruction>>4)&15;    /* 0x00F0 */
+       rm = &regs->regs[index];
+
+       count = 1<<(instruction&3);
+
+       ret = -EFAULT;
+       switch (instruction>>12) {
+       case 0: /* mov.[bwl] to/from memory via r0+rn */
+               if (instruction & 8) {
+                       /* from memory */
+                       src = (unsigned char*) *rm;
+                       src += regs->regs[0];
+                       dst = (unsigned char*) rn;
+                       *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+                       if (copy_from_user(dst, src, count))
+                               goto fetch_fault;
+
+                       if ((count == 2) && dst[1] & 0x80) {
+                               dst[2] = 0xff;
+                               dst[3] = 0xff;
+                       }
+#else
+                       dst += 4-count;
+
+                       if (__copy_user(dst, src, count))
+                               goto fetch_fault;
+
+                       if ((count == 2) && dst[2] & 0x80) {
+                               dst[0] = 0xff;
+                               dst[1] = 0xff;
+                       }
+#endif
+               } else {
+                       /* to memory */
+                       src = (unsigned char*) rm;
+#if !defined(__LITTLE_ENDIAN__)
+                       src += 4-count;
+#endif
+                       dst = (unsigned char*) *rn;
+                       dst += regs->regs[0];
+
+                       if (copy_to_user(dst, src, count))
+                               goto fetch_fault;
+               }
+               ret = 0;
+               break;
+
+       case 1: /* mov.l Rm,@(disp,Rn) */
+               src = (unsigned char*) rm;
+               dst = (unsigned char*) *rn;
+               dst += (instruction&0x000F)<<2;
+
+               if (copy_to_user(dst,src,4))
+                       goto fetch_fault;
+               ret = 0;
+               break;
+
+       case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
+               if (instruction & 4)
+                       *rn -= count;
+               src = (unsigned char*) rm;
+               dst = (unsigned char*) *rn;
+#if !defined(__LITTLE_ENDIAN__)
+               src += 4-count;
+#endif
+               if (copy_to_user(dst, src, count))
+                       goto fetch_fault;
+               ret = 0;
+               break;
+
+       case 5: /* mov.l @(disp,Rm),Rn */
+               src = (unsigned char*) *rm;
+               src += (instruction&0x000F)<<2;
+               dst = (unsigned char*) rn;
+               *(unsigned long*)dst = 0;
+
+               if (copy_from_user(dst,src,4))
+                       goto fetch_fault;
+               ret = 0;
+               break;
+
+       case 6: /* mov.[bwl] from memory, possibly with post-increment */
+               src = (unsigned char*) *rm;
+               if (instruction & 4)
+                       *rm += count;
+               dst = (unsigned char*) rn;
+               *(unsigned long*)dst = 0;
+
+#ifdef __LITTLE_ENDIAN__
+               if (copy_from_user(dst, src, count))
+                       goto fetch_fault;
+
+               if ((count == 2) && dst[1] & 0x80) {
+                       dst[2] = 0xff;
+                       dst[3] = 0xff;
+               }
+#else
+               dst += 4-count;
+
+               if (copy_from_user(dst, src, count))
+                       goto fetch_fault;
+
+               if ((count == 2) && dst[2] & 0x80) {
+                       dst[0] = 0xff;
+                       dst[1] = 0xff;
+               }
+#endif
+               ret = 0;
+               break;
+
+       case 8:
+               switch ((instruction&0xFF00)>>8) {
+               case 0x81: /* mov.w R0,@(disp,Rn) */
+                       src = (unsigned char*) &regs->regs[0];
+#if !defined(__LITTLE_ENDIAN__)
+                       src += 2;
+#endif
+                       dst = (unsigned char*) *rm; /* called Rn in the spec */
+                       dst += (instruction&0x000F)<<1;
+
+                       if (copy_to_user(dst, src, 2))
+                               goto fetch_fault;
+                       ret = 0;
+                       break;
+
+               case 0x85: /* mov.w @(disp,Rm),R0 */
+                       src = (unsigned char*) *rm;
+                       src += (instruction&0x000F)<<1;
+                       dst = (unsigned char*) &regs->regs[0];
+                       *(unsigned long*)dst = 0;
+
+#if !defined(__LITTLE_ENDIAN__)
+                       dst += 2;
+#endif
+
+                       if (copy_from_user(dst, src, 2))
+                               goto fetch_fault;
+
+#ifdef __LITTLE_ENDIAN__
+                       if (dst[1] & 0x80) {
+                               dst[2] = 0xff;
+                               dst[3] = 0xff;
+                       }
+#else
+                       if (dst[2] & 0x80) {
+                               dst[0] = 0xff;
+                               dst[1] = 0xff;
+                       }
+#endif
+                       ret = 0;
+                       break;
+               }
+               break;
+       }
+       return ret;
+
+ fetch_fault:
+       /* Argh. Address not only misaligned but also non-existent.
+        * Raise an EFAULT and see if it's trapped
+        */
+       return die_if_no_fixup("Fault in unaligned fixup", regs, 0);
+}
+
+/*
+ * emulate the instruction in the delay slot
+ * - fetches the instruction from PC+2
+ */
+static inline int handle_unaligned_delayslot(struct pt_regs *regs)
+{
+       u16 instruction;
+
+       if (copy_from_user(&instruction, (u16 *)(regs->pc+2), 2)) {
+               /* the instruction-fetch faulted */
+               if (user_mode(regs))
+                       return -EFAULT;
+
+               /* kernel */
+               die("delay-slot-insn faulting in handle_unaligned_delayslot",
+                   regs, 0);
+       }
+
+       return handle_unaligned_ins(instruction,regs);
+}
+
+/*
+ * handle an instruction that does an unaligned memory access
+ * - have to be careful of branch delay-slot instructions that fault
+ *  SH3:
+ *   - if the branch would be taken PC points to the branch
+ *   - if the branch would not be taken, PC points to delay-slot
+ *  SH4:
+ *   - PC always points to delayed branch
+ * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
+ */
+
+/* Macros to determine offset from current PC for branch instructions */
+/* Explicit type coercion is used to force sign extension where needed */
+#define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
+#define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
+
+/*
+ * XXX: SH-2A needs this too, but it needs an overhaul thanks to mixed 32-bit
+ * opcodes..
+ */
+#ifndef CONFIG_CPU_SH2A
+static int handle_unaligned_notify_count = 10;
+
+static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
+{
+       u_int rm;
+       int ret, index;
+
+       index = (instruction>>8)&15;    /* 0x0F00 */
+       rm = regs->regs[index];
+
+       /* shout about the first ten userspace fixups */
+       if (user_mode(regs) && handle_unaligned_notify_count>0) {
+               handle_unaligned_notify_count--;
+
+               printk(KERN_NOTICE "Fixing up unaligned userspace access "
+                      "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
+                      current->comm, task_pid_nr(current),
+                      (u16 *)regs->pc, instruction);
+       }
+
+       ret = -EFAULT;
+       switch (instruction&0xF000) {
+       case 0x0000:
+               if (instruction==0x000B) {
+                       /* rts */
+                       ret = handle_unaligned_delayslot(regs);
+                       if (ret==0)
+                               regs->pc = regs->pr;
+               }
+               else if ((instruction&0x00FF)==0x0023) {
+                       /* braf @Rm */
+                       ret = handle_unaligned_delayslot(regs);
+                       if (ret==0)
+                               regs->pc += rm + 4;
+               }
+               else if ((instruction&0x00FF)==0x0003) {
+                       /* bsrf @Rm */
+                       ret = handle_unaligned_delayslot(regs);
+                       if (ret==0) {
+                               regs->pr = regs->pc + 4;
+                               regs->pc += rm + 4;
+                       }
+               }
+               else {
+                       /* mov.[bwl] to/from memory via r0+rn */
+                       goto simple;
+               }
+               break;
+
+       case 0x1000: /* mov.l Rm,@(disp,Rn) */
+               goto simple;
+
+       case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
+               goto simple;
+
+       case 0x4000:
+               if ((instruction&0x00FF)==0x002B) {
+                       /* jmp @Rm */
+                       ret = handle_unaligned_delayslot(regs);
+                       if (ret==0)
+                               regs->pc = rm;
+               }
+               else if ((instruction&0x00FF)==0x000B) {
+                       /* jsr @Rm */
+                       ret = handle_unaligned_delayslot(regs);
+                       if (ret==0) {
+                               regs->pr = regs->pc + 4;
+                               regs->pc = rm;
+                       }
+               }
+               else {
+                       /* mov.[bwl] to/from memory via r0+rn */
+                       goto simple;
+               }
+               break;
+
+       case 0x5000: /* mov.l @(disp,Rm),Rn */
+               goto simple;
+
+       case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
+               goto simple;
+
+       case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
+               switch (instruction&0x0F00) {
+               case 0x0100: /* mov.w R0,@(disp,Rm) */
+                       goto simple;
+               case 0x0500: /* mov.w @(disp,Rm),R0 */
+                       goto simple;
+               case 0x0B00: /* bf   lab - no delayslot*/
+                       break;
+               case 0x0F00: /* bf/s lab */
+                       ret = handle_unaligned_delayslot(regs);
+                       if (ret==0) {
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+                               if ((regs->sr & 0x00000001) != 0)
+                                       regs->pc += 4; /* next after slot */
+                               else
+#endif
+                                       regs->pc += SH_PC_8BIT_OFFSET(instruction);
+                       }
+                       break;
+               case 0x0900: /* bt   lab - no delayslot */
+                       break;
+               case 0x0D00: /* bt/s lab */
+                       ret = handle_unaligned_delayslot(regs);
+                       if (ret==0) {
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+                               if ((regs->sr & 0x00000001) == 0)
+                                       regs->pc += 4; /* next after slot */
+                               else
+#endif
+                                       regs->pc += SH_PC_8BIT_OFFSET(instruction);
+                       }
+                       break;
+               }
+               break;
+
+       case 0xA000: /* bra label */
+               ret = handle_unaligned_delayslot(regs);
+               if (ret==0)
+                       regs->pc += SH_PC_12BIT_OFFSET(instruction);
+               break;
+
+       case 0xB000: /* bsr label */
+               ret = handle_unaligned_delayslot(regs);
+               if (ret==0) {
+                       regs->pr = regs->pc + 4;
+                       regs->pc += SH_PC_12BIT_OFFSET(instruction);
+               }
+               break;
+       }
+       return ret;
+
+       /* handle non-delay-slot instruction */
+ simple:
+       ret = handle_unaligned_ins(instruction,regs);
+       if (ret==0)
+               regs->pc += instruction_size(instruction);
+       return ret;
+}
+#endif /* CONFIG_CPU_SH2A */
+
+#ifdef CONFIG_CPU_HAS_SR_RB
+#define lookup_exception_vector(x)     \
+       __asm__ __volatile__ ("stc r2_bank, %0\n\t" : "=r" ((x)))
+#else
+#define lookup_exception_vector(x)     \
+       __asm__ __volatile__ ("mov r4, %0\n\t" : "=r" ((x)))
+#endif
+
+/*
+ * Handle various address error exceptions:
+ *  - instruction address error:
+ *       misaligned PC
+ *       PC >= 0x80000000 in user mode
+ *  - data address error (read and write)
+ *       misaligned data access
+ *       access to >= 0x80000000 is user mode
+ * Unfortuntaly we can't distinguish between instruction address error
+ * and data address errors caused by read accesses.
+ */
+asmlinkage void do_address_error(struct pt_regs *regs,
+                                unsigned long writeaccess,
+                                unsigned long address)
+{
+       unsigned long error_code = 0;
+       mm_segment_t oldfs;
+       siginfo_t info;
+#ifndef CONFIG_CPU_SH2A
+       u16 instruction;
+       int tmp;
+#endif
+
+       /* Intentional ifdef */
+#ifdef CONFIG_CPU_HAS_SR_RB
+       lookup_exception_vector(error_code);
+#endif
+
+       oldfs = get_fs();
+
+       if (user_mode(regs)) {
+               int si_code = BUS_ADRERR;
+
+               local_irq_enable();
+
+               /* bad PC is not something we can fix */
+               if (regs->pc & 1) {
+                       si_code = BUS_ADRALN;
+                       goto uspace_segv;
+               }
+
+#ifndef CONFIG_CPU_SH2A
+               set_fs(USER_DS);
+               if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+                       /* Argh. Fault on the instruction itself.
+                          This should never happen non-SMP
+                       */
+                       set_fs(oldfs);
+                       goto uspace_segv;
+               }
+
+               tmp = handle_unaligned_access(instruction, regs);
+               set_fs(oldfs);
+
+               if (tmp==0)
+                       return; /* sorted */
+#endif
+
+uspace_segv:
+               printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
+                      "access (PC %lx PR %lx)\n", current->comm, regs->pc,
+                      regs->pr);
+
+               info.si_signo = SIGBUS;
+               info.si_errno = 0;
+               info.si_code = si_code;
+               info.si_addr = (void __user *)address;
+               force_sig_info(SIGBUS, &info, current);
+       } else {
+               if (regs->pc & 1)
+                       die("unaligned program counter", regs, error_code);
+
+#ifndef CONFIG_CPU_SH2A
+               set_fs(KERNEL_DS);
+               if (copy_from_user(&instruction, (u16 *)(regs->pc), 2)) {
+                       /* Argh. Fault on the instruction itself.
+                          This should never happen non-SMP
+                       */
+                       set_fs(oldfs);
+                       die("insn faulting in do_address_error", regs, 0);
+               }
+
+               handle_unaligned_access(instruction, regs);
+               set_fs(oldfs);
+#else
+               printk(KERN_NOTICE "Killing process \"%s\" due to unaligned "
+                      "access\n", current->comm);
+
+               force_sig(SIGSEGV, current);
+#endif
+       }
+}
+
+#ifdef CONFIG_SH_DSP
+/*
+ *     SH-DSP support gerg@snapgear.com.
+ */
+int is_dsp_inst(struct pt_regs *regs)
+{
+       unsigned short inst = 0;
+
+       /*
+        * Safe guard if DSP mode is already enabled or we're lacking
+        * the DSP altogether.
+        */
+       if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
+               return 0;
+
+       get_user(inst, ((unsigned short *) regs->pc));
+
+       inst &= 0xf000;
+
+       /* Check for any type of DSP or support instruction */
+       if ((inst == 0xf000) || (inst == 0x4000))
+               return 1;
+
+       return 0;
+}
+#else
+#define is_dsp_inst(regs)      (0)
+#endif /* CONFIG_SH_DSP */
+
+#ifdef CONFIG_CPU_SH2A
+asmlinkage void do_divide_error(unsigned long r4, unsigned long r5,
+                               unsigned long r6, unsigned long r7,
+                               struct pt_regs __regs)
+{
+       siginfo_t info;
+
+       switch (r4) {
+       case TRAP_DIVZERO_ERROR:
+               info.si_code = FPE_INTDIV;
+               break;
+       case TRAP_DIVOVF_ERROR:
+               info.si_code = FPE_INTOVF;
+               break;
+       }
+
+       force_sig_info(SIGFPE, &info, current);
+}
+#endif
+
+/* arch/sh/kernel/cpu/sh4/fpu.c */
+extern int do_fpu_inst(unsigned short, struct pt_regs *);
+extern asmlinkage void do_fpu_state_restore(unsigned long r4, unsigned long r5,
+               unsigned long r6, unsigned long r7, struct pt_regs __regs);
+
+asmlinkage void do_reserved_inst(unsigned long r4, unsigned long r5,
+                               unsigned long r6, unsigned long r7,
+                               struct pt_regs __regs)
+{
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       unsigned long error_code;
+       struct task_struct *tsk = current;
+
+#ifdef CONFIG_SH_FPU_EMU
+       unsigned short inst = 0;
+       int err;
+
+       get_user(inst, (unsigned short*)regs->pc);
+
+       err = do_fpu_inst(inst, regs);
+       if (!err) {
+               regs->pc += instruction_size(inst);
+               return;
+       }
+       /* not a FPU inst. */
+#endif
+
+#ifdef CONFIG_SH_DSP
+       /* Check if it's a DSP instruction */
+       if (is_dsp_inst(regs)) {
+               /* Enable DSP mode, and restart instruction. */
+               regs->sr |= SR_DSP;
+               return;
+       }
+#endif
+
+       lookup_exception_vector(error_code);
+
+       local_irq_enable();
+       CHK_REMOTE_DEBUG(regs);
+       force_sig(SIGILL, tsk);
+       die_if_no_fixup("reserved instruction", regs, error_code);
+}
+
+#ifdef CONFIG_SH_FPU_EMU
+static int emulate_branch(unsigned short inst, struct pt_regs* regs)
+{
+       /*
+        * bfs: 8fxx: PC+=d*2+4;
+        * bts: 8dxx: PC+=d*2+4;
+        * bra: axxx: PC+=D*2+4;
+        * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
+        * braf:0x23: PC+=Rn*2+4;
+        * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
+        * jmp: 4x2b: PC=Rn;
+        * jsr: 4x0b: PC=Rn      after PR=PC+4;
+        * rts: 000b: PC=PR;
+        */
+       if ((inst & 0xfd00) == 0x8d00) {
+               regs->pc += SH_PC_8BIT_OFFSET(inst);
+               return 0;
+       }
+
+       if ((inst & 0xe000) == 0xa000) {
+               regs->pc += SH_PC_12BIT_OFFSET(inst);
+               return 0;
+       }
+
+       if ((inst & 0xf0df) == 0x0003) {
+               regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
+               return 0;
+       }
+
+       if ((inst & 0xf0df) == 0x400b) {
+               regs->pc = regs->regs[(inst & 0x0f00) >> 8];
+               return 0;
+       }
+
+       if ((inst & 0xffff) == 0x000b) {
+               regs->pc = regs->pr;
+               return 0;
+       }
+
+       return 1;
+}
+#endif
+
+asmlinkage void do_illegal_slot_inst(unsigned long r4, unsigned long r5,
+                               unsigned long r6, unsigned long r7,
+                               struct pt_regs __regs)
+{
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       unsigned long error_code;
+       struct task_struct *tsk = current;
+#ifdef CONFIG_SH_FPU_EMU
+       unsigned short inst = 0;
+
+       get_user(inst, (unsigned short *)regs->pc + 1);
+       if (!do_fpu_inst(inst, regs)) {
+               get_user(inst, (unsigned short *)regs->pc);
+               if (!emulate_branch(inst, regs))
+                       return;
+               /* fault in branch.*/
+       }
+       /* not a FPU inst. */
+#endif
+
+       lookup_exception_vector(error_code);
+
+       local_irq_enable();
+       CHK_REMOTE_DEBUG(regs);
+       force_sig(SIGILL, tsk);
+       die_if_no_fixup("illegal slot instruction", regs, error_code);
+}
+
+asmlinkage void do_exception_error(unsigned long r4, unsigned long r5,
+                                  unsigned long r6, unsigned long r7,
+                                  struct pt_regs __regs)
+{
+       struct pt_regs *regs = RELOC_HIDE(&__regs, 0);
+       long ex;
+
+       lookup_exception_vector(ex);
+       die_if_kernel("exception", regs, ex);
+}
+
+#if defined(CONFIG_SH_STANDARD_BIOS)
+void *gdb_vbr_vector;
+
+static inline void __init gdb_vbr_init(void)
+{
+       register unsigned long vbr;
+
+       /*
+        * Read the old value of the VBR register to initialise
+        * the vector through which debug and BIOS traps are
+        * delegated by the Linux trap handler.
+        */
+       asm volatile("stc vbr, %0" : "=r" (vbr));
+
+       gdb_vbr_vector = (void *)(vbr + 0x100);
+       printk("Setting GDB trap vector to 0x%08lx\n",
+              (unsigned long)gdb_vbr_vector);
+}
+#endif
+
+void __cpuinit per_cpu_trap_init(void)
+{
+       extern void *vbr_base;
+
+#ifdef CONFIG_SH_STANDARD_BIOS
+       if (raw_smp_processor_id() == 0)
+               gdb_vbr_init();
+#endif
+
+       /* NOTE: The VBR value should be at P1
+          (or P2, virtural "fixed" address space).
+          It's definitely should not in physical address.  */
+
+       asm volatile("ldc       %0, vbr"
+                    : /* no output */
+                    : "r" (&vbr_base)
+                    : "memory");
+}
+
+void *set_exception_table_vec(unsigned int vec, void *handler)
+{
+       extern void *exception_handling_table[];
+       void *old_handler;
+
+       old_handler = exception_handling_table[vec];
+       exception_handling_table[vec] = handler;
+       return old_handler;
+}
+
+extern asmlinkage void address_error_handler(unsigned long r4, unsigned long r5,
+                                            unsigned long r6, unsigned long r7,
+                                            struct pt_regs __regs);
+
+void __init trap_init(void)
+{
+       set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
+       set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
+
+#if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
+    defined(CONFIG_SH_FPU_EMU)
+       /*
+        * For SH-4 lacking an FPU, treat floating point instructions as
+        * reserved. They'll be handled in the math-emu case, or faulted on
+        * otherwise.
+        */
+       set_exception_table_evt(0x800, do_reserved_inst);
+       set_exception_table_evt(0x820, do_illegal_slot_inst);
+#elif defined(CONFIG_SH_FPU)
+#ifdef CONFIG_CPU_SUBTYPE_SHX3
+       set_exception_table_evt(0xd80, do_fpu_state_restore);
+       set_exception_table_evt(0xda0, do_fpu_state_restore);
+#else
+       set_exception_table_evt(0x800, do_fpu_state_restore);
+       set_exception_table_evt(0x820, do_fpu_state_restore);
+#endif
+#endif
+
+#ifdef CONFIG_CPU_SH2
+       set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_handler);
+#endif
+#ifdef CONFIG_CPU_SH2A
+       set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
+       set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
+#endif
+
+       /* Setup VBR for boot cpu */
+       per_cpu_trap_init();
+}
+
+#ifdef CONFIG_BUG
+void handle_BUG(struct pt_regs *regs)
+{
+       enum bug_trap_type tt;
+       tt = report_bug(regs->pc, regs);
+       if (tt == BUG_TRAP_TYPE_WARN) {
+               regs->pc += 2;
+               return;
+       }
+
+       die("Kernel BUG", regs, TRAPA_BUG_OPCODE & 0xff);
+}
+
+int is_valid_bugaddr(unsigned long addr)
+{
+       return addr >= PAGE_OFFSET;
+}
+#endif
+
+void show_trace(struct task_struct *tsk, unsigned long *sp,
+               struct pt_regs *regs)
+{
+       unsigned long addr;
+
+       if (regs && user_mode(regs))
+               return;
+
+       printk("\nCall trace: ");
+#ifdef CONFIG_KALLSYMS
+       printk("\n");
+#endif
+
+       while (!kstack_end(sp)) {
+               addr = *sp++;
+               if (kernel_text_address(addr))
+                       print_ip_sym(addr);
+       }
+
+       printk("\n");
+
+       if (!tsk)
+               tsk = current;
+
+       debug_show_held_locks(tsk);
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+       unsigned long stack;
+
+       if (!tsk)
+               tsk = current;
+       if (tsk == current)
+               sp = (unsigned long *)current_stack_pointer;
+       else
+               sp = (unsigned long *)tsk->thread.sp;
+
+       stack = (unsigned long)sp;
+       dump_mem("Stack: ", stack, THREAD_SIZE +
+                (unsigned long)task_stack_page(tsk));
+       show_trace(tsk, sp, NULL);
+}
+
+void dump_stack(void)
+{
+       show_stack(NULL, NULL);
+}
+EXPORT_SYMBOL(dump_stack);
diff --git a/arch/sh/kernel/traps_64.c b/arch/sh/kernel/traps_64.c
new file mode 100644 (file)
index 0000000..742ce18
--- /dev/null
@@ -0,0 +1,981 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * arch/sh64/kernel/traps.c
+ *
+ * Copyright (C) 2000, 2001  Paolo Alberelli
+ * Copyright (C) 2003, 2004  Paul Mundt
+ * Copyright (C) 2003, 2004  Richard Curnow
+ *
+ */
+
+/*
+ * 'Traps.c' handles hardware traps and faults after we have saved some
+ * state in 'entry.S'.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/timer.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/interrupt.h>
+#include <linux/sysctl.h>
+#include <linux/module.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <asm/pgtable.h>
+
+#undef DEBUG_EXCEPTION
+#ifdef DEBUG_EXCEPTION
+/* implemented in ../lib/dbg.c */
+extern void show_excp_regs(char *fname, int trapnr, int signr,
+                          struct pt_regs *regs);
+#else
+#define show_excp_regs(a, b, c, d)
+#endif
+
+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+               unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
+
+#define DO_ERROR(trapnr, signr, str, name, tsk) \
+asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
+{ \
+       do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
+}
+
+spinlock_t die_lock;
+
+void die(const char * str, struct pt_regs * regs, long err)
+{
+       console_verbose();
+       spin_lock_irq(&die_lock);
+       printk("%s: %lx\n", str, (err & 0xffffff));
+       show_regs(regs);
+       spin_unlock_irq(&die_lock);
+       do_exit(SIGSEGV);
+}
+
+static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
+{
+       if (!user_mode(regs))
+               die(str, regs, err);
+}
+
+static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
+{
+       if (!user_mode(regs)) {
+               const struct exception_table_entry *fixup;
+               fixup = search_exception_tables(regs->pc);
+               if (fixup) {
+                       regs->pc = fixup->fixup;
+                       return;
+               }
+               die(str, regs, err);
+       }
+}
+
+DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
+DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
+
+
+/* Implement misaligned load/store handling for kernel (and optionally for user
+   mode too).  Limitation : only SHmedia mode code is handled - there is no
+   handling at all for misaligned accesses occurring in SHcompact code yet. */
+
+static int misaligned_fixup(struct pt_regs *regs);
+
+asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
+{
+       if (misaligned_fixup(regs) < 0) {
+               do_unhandled_exception(7, SIGSEGV, "address error(load)",
+                               "do_address_error_load",
+                               error_code, regs, current);
+       }
+       return;
+}
+
+asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
+{
+       if (misaligned_fixup(regs) < 0) {
+               do_unhandled_exception(8, SIGSEGV, "address error(store)",
+                               "do_address_error_store",
+                               error_code, regs, current);
+       }
+       return;
+}
+
+#if defined(CONFIG_SH64_ID2815_WORKAROUND)
+
+#define OPCODE_INVALID      0
+#define OPCODE_USER_VALID   1
+#define OPCODE_PRIV_VALID   2
+
+/* getcon/putcon - requires checking which control register is referenced. */
+#define OPCODE_CTRL_REG     3
+
+/* Table of valid opcodes for SHmedia mode.
+   Form a 10-bit value by concatenating the major/minor opcodes i.e.
+   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
+   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
+   LSBs==4'b0000 etc). */
+static unsigned long shmedia_opcode_table[64] = {
+       0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
+       0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
+       0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
+       0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
+       0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+       0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+       0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
+       0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
+};
+
+void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
+{
+       /* Workaround SH5-101 cut2 silicon defect #2815 :
+          in some situations, inter-mode branches from SHcompact -> SHmedia
+          which should take ITLBMISS or EXECPROT exceptions at the target
+          falsely take RESINST at the target instead. */
+
+       unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
+       unsigned long pc, aligned_pc;
+       int get_user_error;
+       int trapnr = 12;
+       int signr = SIGILL;
+       char *exception_name = "reserved_instruction";
+
+       pc = regs->pc;
+       if ((pc & 3) == 1) {
+               /* SHmedia : check for defect.  This requires executable vmas
+                  to be readable too. */
+               aligned_pc = pc & ~3;
+               if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+                       get_user_error = -EFAULT;
+               } else {
+                       get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+               }
+               if (get_user_error >= 0) {
+                       unsigned long index, shift;
+                       unsigned long major, minor, combined;
+                       unsigned long reserved_field;
+                       reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
+                       major = (opcode >> 26) & 0x3f;
+                       minor = (opcode >> 16) & 0xf;
+                       combined = (major << 4) | minor;
+                       index = major;
+                       shift = minor << 1;
+                       if (reserved_field == 0) {
+                               int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
+                               switch (opcode_state) {
+                                       case OPCODE_INVALID:
+                                               /* Trap. */
+                                               break;
+                                       case OPCODE_USER_VALID:
+                                               /* Restart the instruction : the branch to the instruction will now be from an RTE
+                                                  not from SHcompact so the silicon defect won't be triggered. */
+                                               return;
+                                       case OPCODE_PRIV_VALID:
+                                               if (!user_mode(regs)) {
+                                                       /* Should only ever get here if a module has
+                                                          SHcompact code inside it.  If so, the same fix up is needed. */
+                                                       return; /* same reason */
+                                               }
+                                               /* Otherwise, user mode trying to execute a privileged instruction -
+                                                  fall through to trap. */
+                                               break;
+                                       case OPCODE_CTRL_REG:
+                                               /* If in privileged mode, return as above. */
+                                               if (!user_mode(regs)) return;
+                                               /* In user mode ... */
+                                               if (combined == 0x9f) { /* GETCON */
+                                                       unsigned long regno = (opcode >> 20) & 0x3f;
+                                                       if (regno >= 62) {
+                                                               return;
+                                                       }
+                                                       /* Otherwise, reserved or privileged control register, => trap */
+                                               } else if (combined == 0x1bf) { /* PUTCON */
+                                                       unsigned long regno = (opcode >> 4) & 0x3f;
+                                                       if (regno >= 62) {
+                                                               return;
+                                                       }
+                                                       /* Otherwise, reserved or privileged control register, => trap */
+                                               } else {
+                                                       /* Trap */
+                                               }
+                                               break;
+                                       default:
+                                               /* Fall through to trap. */
+                                               break;
+                               }
+                       }
+                       /* fall through to normal resinst processing */
+               } else {
+                       /* Error trying to read opcode.  This typically means a
+                          real fault, not a RESINST any more.  So change the
+                          codes. */
+                       trapnr = 87;
+                       exception_name = "address error (exec)";
+                       signr = SIGSEGV;
+               }
+       }
+
+       do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
+}
+
+#else /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* If the workaround isn't needed, this is just a straightforward reserved
+   instruction */
+DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
+
+#endif /* CONFIG_SH64_ID2815_WORKAROUND */
+
+/* Called with interrupts disabled */
+asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
+{
+       show_excp_regs(__FUNCTION__, -1, -1, regs);
+       die_if_kernel("exception", regs, ex);
+}
+
+int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
+{
+       /* Syscall debug */
+        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
+
+       die_if_kernel("unknown trapa", regs, scId);
+
+       return -ENOSYS;
+}
+
+void show_stack(struct task_struct *tsk, unsigned long *sp)
+{
+#ifdef CONFIG_KALLSYMS
+       extern void sh64_unwind(struct pt_regs *regs);
+       struct pt_regs *regs;
+
+       regs = tsk ? tsk->thread.kregs : NULL;
+
+       sh64_unwind(regs);
+#else
+       printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
+#endif
+}
+
+void show_task(unsigned long *sp)
+{
+       show_stack(NULL, sp);
+}
+
+void dump_stack(void)
+{
+       show_task(NULL);
+}
+/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
+EXPORT_SYMBOL(dump_stack);
+
+static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
+               unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
+{
+       show_excp_regs(fn_name, trapnr, signr, regs);
+       tsk->thread.error_code = error_code;
+       tsk->thread.trap_no = trapnr;
+
+       if (user_mode(regs))
+               force_sig(signr, tsk);
+
+       die_if_no_fixup(str, regs, error_code);
+}
+
+static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
+{
+       int get_user_error;
+       unsigned long aligned_pc;
+       unsigned long opcode;
+
+       if ((pc & 3) == 1) {
+               /* SHmedia */
+               aligned_pc = pc & ~3;
+               if (from_user_mode) {
+                       if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
+                               get_user_error = -EFAULT;
+                       } else {
+                               get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
+                               *result_opcode = opcode;
+                       }
+                       return get_user_error;
+               } else {
+                       /* If the fault was in the kernel, we can either read
+                        * this directly, or if not, we fault.
+                       */
+                       *result_opcode = *(unsigned long *) aligned_pc;
+                       return 0;
+               }
+       } else if ((pc & 1) == 0) {
+               /* SHcompact */
+               /* TODO : provide handling for this.  We don't really support
+                  user-mode SHcompact yet, and for a kernel fault, this would
+                  have to come from a module built for SHcompact.  */
+               return -EFAULT;
+       } else {
+               /* misaligned */
+               return -EFAULT;
+       }
+}
+
+static int address_is_sign_extended(__u64 a)
+{
+       __u64 b;
+#if (NEFF == 32)
+       b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
+       return (b == a) ? 1 : 0;
+#else
+#error "Sign extend check only works for NEFF==32"
+#endif
+}
+
+static int generate_and_check_address(struct pt_regs *regs,
+                                     __u32 opcode,
+                                     int displacement_not_indexed,
+                                     int width_shift,
+                                     __u64 *address)
+{
+       /* return -1 for fault, 0 for OK */
+
+       __u64 base_address, addr;
+       int basereg;
+
+       basereg = (opcode >> 20) & 0x3f;
+       base_address = regs->regs[basereg];
+       if (displacement_not_indexed) {
+               __s64 displacement;
+               displacement = (opcode >> 10) & 0x3ff;
+               displacement = ((displacement << 54) >> 54); /* sign extend */
+               addr = (__u64)((__s64)base_address + (displacement << width_shift));
+       } else {
+               __u64 offset;
+               int offsetreg;
+               offsetreg = (opcode >> 10) & 0x3f;
+               offset = regs->regs[offsetreg];
+               addr = base_address + offset;
+       }
+
+       /* Check sign extended */
+       if (!address_is_sign_extended(addr)) {
+               return -1;
+       }
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+       /* Check accessible.  For misaligned access in the kernel, assume the
+          address is always accessible (and if not, just fault when the
+          load/store gets done.) */
+       if (user_mode(regs)) {
+               if (addr >= TASK_SIZE) {
+                       return -1;
+               }
+               /* Do access_ok check later - it depends on whether it's a load or a store. */
+       }
+#endif
+
+       *address = addr;
+       return 0;
+}
+
+/* Default value as for sh */
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+static int user_mode_unaligned_fixup_count = 10;
+static int user_mode_unaligned_fixup_enable = 1;
+#endif
+
+static int kernel_mode_unaligned_fixup_count = 32;
+
+static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
+{
+       unsigned short x;
+       unsigned char *p, *q;
+       p = (unsigned char *) (int) address;
+       q = (unsigned char *) &x;
+       q[0] = p[0];
+       q[1] = p[1];
+
+       if (do_sign_extend) {
+               *result = (__u64)(__s64) *(short *) &x;
+       } else {
+               *result = (__u64) x;
+       }
+}
+
+static void misaligned_kernel_word_store(__u64 address, __u64 value)
+{
+       unsigned short x;
+       unsigned char *p, *q;
+       p = (unsigned char *) (int) address;
+       q = (unsigned char *) &x;
+
+       x = (__u16) value;
+       p[0] = q[0];
+       p[1] = q[1];
+}
+
+static int misaligned_load(struct pt_regs *regs,
+                          __u32 opcode,
+                          int displacement_not_indexed,
+                          int width_shift,
+                          int do_sign_extend)
+{
+       /* Return -1 for a fault, 0 for OK */
+       int error;
+       int destreg;
+       __u64 address;
+
+       error = generate_and_check_address(regs, opcode,
+                       displacement_not_indexed, width_shift, &address);
+       if (error < 0) {
+               return error;
+       }
+
+       destreg = (opcode >> 4) & 0x3f;
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+       if (user_mode(regs)) {
+               __u64 buffer;
+
+               if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+                       return -1;
+               }
+
+               if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+                       return -1; /* fault */
+               }
+               switch (width_shift) {
+               case 1:
+                       if (do_sign_extend) {
+                               regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
+                       } else {
+                               regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
+                       }
+                       break;
+               case 2:
+                       regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
+                       break;
+               case 3:
+                       regs->regs[destreg] = buffer;
+                       break;
+               default:
+                       printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+                               width_shift, (unsigned long) regs->pc);
+                       break;
+               }
+       } else
+#endif
+       {
+               /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+               __u64 lo, hi;
+
+               switch (width_shift) {
+               case 1:
+                       misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
+                       break;
+               case 2:
+                       asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
+                       asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
+                       regs->regs[destreg] = lo | hi;
+                       break;
+               case 3:
+                       asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
+                       asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
+                       regs->regs[destreg] = lo | hi;
+                       break;
+
+               default:
+                       printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
+                               width_shift, (unsigned long) regs->pc);
+                       break;
+               }
+       }
+
+       return 0;
+
+}
+
+static int misaligned_store(struct pt_regs *regs,
+                           __u32 opcode,
+                           int displacement_not_indexed,
+                           int width_shift)
+{
+       /* Return -1 for a fault, 0 for OK */
+       int error;
+       int srcreg;
+       __u64 address;
+
+       error = generate_and_check_address(regs, opcode,
+                       displacement_not_indexed, width_shift, &address);
+       if (error < 0) {
+               return error;
+       }
+
+       srcreg = (opcode >> 4) & 0x3f;
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+       if (user_mode(regs)) {
+               __u64 buffer;
+
+               if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+                       return -1;
+               }
+
+               switch (width_shift) {
+               case 1:
+                       *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
+                       break;
+               case 2:
+                       *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
+                       break;
+               case 3:
+                       buffer = regs->regs[srcreg];
+                       break;
+               default:
+                       printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+                               width_shift, (unsigned long) regs->pc);
+                       break;
+               }
+
+               if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+                       return -1; /* fault */
+               }
+       } else
+#endif
+       {
+               /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
+               __u64 val = regs->regs[srcreg];
+
+               switch (width_shift) {
+               case 1:
+                       misaligned_kernel_word_store(address, val);
+                       break;
+               case 2:
+                       asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
+                       asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
+                       break;
+               case 3:
+                       asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
+                       asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
+                       break;
+
+               default:
+                       printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
+                               width_shift, (unsigned long) regs->pc);
+                       break;
+               }
+       }
+
+       return 0;
+
+}
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
+   error. */
+static int misaligned_fpu_load(struct pt_regs *regs,
+                          __u32 opcode,
+                          int displacement_not_indexed,
+                          int width_shift,
+                          int do_paired_load)
+{
+       /* Return -1 for a fault, 0 for OK */
+       int error;
+       int destreg;
+       __u64 address;
+
+       error = generate_and_check_address(regs, opcode,
+                       displacement_not_indexed, width_shift, &address);
+       if (error < 0) {
+               return error;
+       }
+
+       destreg = (opcode >> 4) & 0x3f;
+       if (user_mode(regs)) {
+               __u64 buffer;
+               __u32 buflo, bufhi;
+
+               if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
+                       return -1;
+               }
+
+               if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
+                       return -1; /* fault */
+               }
+               /* 'current' may be the current owner of the FPU state, so
+                  context switch the registers into memory so they can be
+                  indexed by register number. */
+               if (last_task_used_math == current) {
+                       grab_fpu();
+                       fpsave(&current->thread.fpu.hard);
+                       release_fpu();
+                       last_task_used_math = NULL;
+                       regs->sr |= SR_FD;
+               }
+
+               buflo = *(__u32*) &buffer;
+               bufhi = *(1 + (__u32*) &buffer);
+
+               switch (width_shift) {
+               case 2:
+                       current->thread.fpu.hard.fp_regs[destreg] = buflo;
+                       break;
+               case 3:
+                       if (do_paired_load) {
+                               current->thread.fpu.hard.fp_regs[destreg] = buflo;
+                               current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+                       } else {
+#if defined(CONFIG_LITTLE_ENDIAN)
+                               current->thread.fpu.hard.fp_regs[destreg] = bufhi;
+                               current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
+#else
+                               current->thread.fpu.hard.fp_regs[destreg] = buflo;
+                               current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
+#endif
+                       }
+                       break;
+               default:
+                       printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
+                               width_shift, (unsigned long) regs->pc);
+                       break;
+               }
+               return 0;
+       } else {
+               die ("Misaligned FPU load inside kernel", regs, 0);
+               return -1;
+       }
+
+
+}
+
+static int misaligned_fpu_store(struct pt_regs *regs,
+                          __u32 opcode,
+                          int displacement_not_indexed,
+                          int width_shift,
+                          int do_paired_load)
+{
+       /* Return -1 for a fault, 0 for OK */
+       int error;
+       int srcreg;
+       __u64 address;
+
+       error = generate_and_check_address(regs, opcode,
+                       displacement_not_indexed, width_shift, &address);
+       if (error < 0) {
+               return error;
+       }
+
+       srcreg = (opcode >> 4) & 0x3f;
+       if (user_mode(regs)) {
+               __u64 buffer;
+               /* Initialise these to NaNs. */
+               __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
+
+               if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
+                       return -1;
+               }
+
+               /* 'current' may be the current owner of the FPU state, so
+                  context switch the registers into memory so they can be
+                  indexed by register number. */
+               if (last_task_used_math == current) {
+                       grab_fpu();
+                       fpsave(&current->thread.fpu.hard);
+                       release_fpu();
+                       last_task_used_math = NULL;
+                       regs->sr |= SR_FD;
+               }
+
+               switch (width_shift) {
+               case 2:
+                       buflo = current->thread.fpu.hard.fp_regs[srcreg];
+                       break;
+               case 3:
+                       if (do_paired_load) {
+                               buflo = current->thread.fpu.hard.fp_regs[srcreg];
+                               bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+                       } else {
+#if defined(CONFIG_LITTLE_ENDIAN)
+                               bufhi = current->thread.fpu.hard.fp_regs[srcreg];
+                               buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
+#else
+                               buflo = current->thread.fpu.hard.fp_regs[srcreg];
+                               bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
+#endif
+                       }
+                       break;
+               default:
+                       printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
+                               width_shift, (unsigned long) regs->pc);
+                       break;
+               }
+
+               *(__u32*) &buffer = buflo;
+               *(1 + (__u32*) &buffer) = bufhi;
+               if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
+                       return -1; /* fault */
+               }
+               return 0;
+       } else {
+               die ("Misaligned FPU load inside kernel", regs, 0);
+               return -1;
+       }
+}
+#endif
+
+static int misaligned_fixup(struct pt_regs *regs)
+{
+       unsigned long opcode;
+       int error;
+       int major, minor;
+
+#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+       /* Never fixup user mode misaligned accesses without this option enabled. */
+       return -1;
+#else
+       if (!user_mode_unaligned_fixup_enable) return -1;
+#endif
+
+       error = read_opcode(regs->pc, &opcode, user_mode(regs));
+       if (error < 0) {
+               return error;
+       }
+       major = (opcode >> 26) & 0x3f;
+       minor = (opcode >> 16) & 0xf;
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+       if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
+               --user_mode_unaligned_fixup_count;
+               /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
+               printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+                      current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+       } else
+#endif
+       if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
+               --kernel_mode_unaligned_fixup_count;
+               if (in_interrupt()) {
+                       printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
+                              (__u32)regs->pc, opcode);
+               } else {
+                       printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
+                              current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
+               }
+       }
+
+
+       switch (major) {
+               case (0x84>>2): /* LD.W */
+                       error = misaligned_load(regs, opcode, 1, 1, 1);
+                       break;
+               case (0xb0>>2): /* LD.UW */
+                       error = misaligned_load(regs, opcode, 1, 1, 0);
+                       break;
+               case (0x88>>2): /* LD.L */
+                       error = misaligned_load(regs, opcode, 1, 2, 1);
+                       break;
+               case (0x8c>>2): /* LD.Q */
+                       error = misaligned_load(regs, opcode, 1, 3, 0);
+                       break;
+
+               case (0xa4>>2): /* ST.W */
+                       error = misaligned_store(regs, opcode, 1, 1);
+                       break;
+               case (0xa8>>2): /* ST.L */
+                       error = misaligned_store(regs, opcode, 1, 2);
+                       break;
+               case (0xac>>2): /* ST.Q */
+                       error = misaligned_store(regs, opcode, 1, 3);
+                       break;
+
+               case (0x40>>2): /* indexed loads */
+                       switch (minor) {
+                               case 0x1: /* LDX.W */
+                                       error = misaligned_load(regs, opcode, 0, 1, 1);
+                                       break;
+                               case 0x5: /* LDX.UW */
+                                       error = misaligned_load(regs, opcode, 0, 1, 0);
+                                       break;
+                               case 0x2: /* LDX.L */
+                                       error = misaligned_load(regs, opcode, 0, 2, 1);
+                                       break;
+                               case 0x3: /* LDX.Q */
+                                       error = misaligned_load(regs, opcode, 0, 3, 0);
+                                       break;
+                               default:
+                                       error = -1;
+                                       break;
+                       }
+                       break;
+
+               case (0x60>>2): /* indexed stores */
+                       switch (minor) {
+                               case 0x1: /* STX.W */
+                                       error = misaligned_store(regs, opcode, 0, 1);
+                                       break;
+                               case 0x2: /* STX.L */
+                                       error = misaligned_store(regs, opcode, 0, 2);
+                                       break;
+                               case 0x3: /* STX.Q */
+                                       error = misaligned_store(regs, opcode, 0, 3);
+                                       break;
+                               default:
+                                       error = -1;
+                                       break;
+                       }
+                       break;
+
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+               case (0x94>>2): /* FLD.S */
+                       error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
+                       break;
+               case (0x98>>2): /* FLD.P */
+                       error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
+                       break;
+               case (0x9c>>2): /* FLD.D */
+                       error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
+                       break;
+               case (0x1c>>2): /* floating indexed loads */
+                       switch (minor) {
+                       case 0x8: /* FLDX.S */
+                               error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
+                               break;
+                       case 0xd: /* FLDX.P */
+                               error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
+                               break;
+                       case 0x9: /* FLDX.D */
+                               error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
+                               break;
+                       default:
+                               error = -1;
+                               break;
+                       }
+                       break;
+               case (0xb4>>2): /* FLD.S */
+                       error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
+                       break;
+               case (0xb8>>2): /* FLD.P */
+                       error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
+                       break;
+               case (0xbc>>2): /* FLD.D */
+                       error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
+                       break;
+               case (0x3c>>2): /* floating indexed stores */
+                       switch (minor) {
+                       case 0x8: /* FSTX.S */
+                               error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
+                               break;
+                       case 0xd: /* FSTX.P */
+                               error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
+                               break;
+                       case 0x9: /* FSTX.D */
+                               error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
+                               break;
+                       default:
+                               error = -1;
+                               break;
+                       }
+                       break;
+#endif
+
+               default:
+                       /* Fault */
+                       error = -1;
+                       break;
+       }
+
+       if (error < 0) {
+               return error;
+       } else {
+               regs->pc += 4; /* Skip the instruction that's just been emulated */
+               return 0;
+       }
+
+}
+
+static ctl_table unaligned_table[] = {
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "kernel_reports",
+               .data           = &kernel_mode_unaligned_fixup_count,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec
+       },
+#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "user_reports",
+               .data           = &user_mode_unaligned_fixup_count,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec
+       },
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "user_enable",
+               .data           = &user_mode_unaligned_fixup_enable,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = &proc_dointvec},
+#endif
+       {}
+};
+
+static ctl_table unaligned_root[] = {
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "unaligned_fixup",
+               .mode           = 0555,
+               unaligned_table
+       },
+       {}
+};
+
+static ctl_table sh64_root[] = {
+       {
+               .ctl_name       = CTL_UNNUMBERED,
+               .procname       = "sh64",
+               .mode           = 0555,
+               .child          = unaligned_root
+       },
+       {}
+};
+static struct ctl_table_header *sysctl_header;
+static int __init init_sysctl(void)
+{
+       sysctl_header = register_sysctl_table(sh64_root);
+       return 0;
+}
+
+__initcall(init_sysctl);
+
+
+asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
+{
+       u64 peek_real_address_q(u64 addr);
+       u64 poke_real_address_q(u64 addr, u64 val);
+       unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
+       unsigned long long exp_cause;
+       /* It's not worth ioremapping the debug module registers for the amount
+          of access we make to them - just go direct to their physical
+          addresses. */
+       exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
+       if (exp_cause & ~4) {
+               printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
+                       (unsigned long)(exp_cause & 0xffffffff));
+       }
+       show_state();
+       /* Clear all DEBUGINT causes */
+       poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
+}
diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c
deleted file mode 100644 (file)
index f32df38..0000000
+++ /dev/null
@@ -1,982 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * arch/sh64/kernel/traps.c
- *
- * Copyright (C) 2000, 2001  Paolo Alberelli
- * Copyright (C) 2003, 2004  Paul Mundt
- * Copyright (C) 2003, 2004  Richard Curnow
- *
- */
-
-/*
- * 'Traps.c' handles hardware traps and faults after we have saved some
- * state in 'entry.S'.
- */
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/ptrace.h>
-#include <linux/timer.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/kallsyms.h>
-#include <linux/interrupt.h>
-#include <linux/sysctl.h>
-#include <linux/module.h>
-#include <asm/system.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/atomic.h>
-#include <asm/processor.h>
-#include <asm/pgtable.h>
-
-#undef DEBUG_EXCEPTION
-#ifdef DEBUG_EXCEPTION
-/* implemented in ../lib/dbg.c */
-extern void show_excp_regs(char *fname, int trapnr, int signr,
-                          struct pt_regs *regs);
-#else
-#define show_excp_regs(a, b, c, d)
-#endif
-
-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
-               unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk);
-
-#define DO_ERROR(trapnr, signr, str, name, tsk) \
-asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
-{ \
-       do_unhandled_exception(trapnr, signr, str, __stringify(name), error_code, regs, current); \
-}
-
-spinlock_t die_lock;
-
-void die(const char * str, struct pt_regs * regs, long err)
-{
-       console_verbose();
-       spin_lock_irq(&die_lock);
-       printk("%s: %lx\n", str, (err & 0xffffff));
-       show_regs(regs);
-       spin_unlock_irq(&die_lock);
-       do_exit(SIGSEGV);
-}
-
-static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
-{
-       if (!user_mode(regs))
-               die(str, regs, err);
-}
-
-static void die_if_no_fixup(const char * str, struct pt_regs * regs, long err)
-{
-       if (!user_mode(regs)) {
-               const struct exception_table_entry *fixup;
-               fixup = search_exception_tables(regs->pc);
-               if (fixup) {
-                       regs->pc = fixup->fixup;
-                       return;
-               }
-               die(str, regs, err);
-       }
-}
-
-DO_ERROR(13, SIGILL,  "illegal slot instruction", illegal_slot_inst, current)
-DO_ERROR(87, SIGSEGV, "address error (exec)", address_error_exec, current)
-
-
-/* Implement misaligned load/store handling for kernel (and optionally for user
-   mode too).  Limitation : only SHmedia mode code is handled - there is no
-   handling at all for misaligned accesses occurring in SHcompact code yet. */
-
-static int misaligned_fixup(struct pt_regs *regs);
-
-asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
-{
-       if (misaligned_fixup(regs) < 0) {
-               do_unhandled_exception(7, SIGSEGV, "address error(load)",
-                               "do_address_error_load",
-                               error_code, regs, current);
-       }
-       return;
-}
-
-asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
-{
-       if (misaligned_fixup(regs) < 0) {
-               do_unhandled_exception(8, SIGSEGV, "address error(store)",
-                               "do_address_error_store",
-                               error_code, regs, current);
-       }
-       return;
-}
-
-#if defined(CONFIG_SH64_ID2815_WORKAROUND)
-
-#define OPCODE_INVALID      0
-#define OPCODE_USER_VALID   1
-#define OPCODE_PRIV_VALID   2
-
-/* getcon/putcon - requires checking which control register is referenced. */
-#define OPCODE_CTRL_REG     3
-
-/* Table of valid opcodes for SHmedia mode.
-   Form a 10-bit value by concatenating the major/minor opcodes i.e.
-   opcode[31:26,20:16].  The 6 MSBs of this value index into the following
-   array.  The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
-   LSBs==4'b0000 etc). */
-static unsigned long shmedia_opcode_table[64] = {
-       0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
-       0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
-       0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
-       0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
-       0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
-       0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
-       0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
-       0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
-};
-
-void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
-{
-       /* Workaround SH5-101 cut2 silicon defect #2815 :
-          in some situations, inter-mode branches from SHcompact -> SHmedia
-          which should take ITLBMISS or EXECPROT exceptions at the target
-          falsely take RESINST at the target instead. */
-
-       unsigned long opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
-       unsigned long pc, aligned_pc;
-       int get_user_error;
-       int trapnr = 12;
-       int signr = SIGILL;
-       char *exception_name = "reserved_instruction";
-
-       pc = regs->pc;
-       if ((pc & 3) == 1) {
-               /* SHmedia : check for defect.  This requires executable vmas
-                  to be readable too. */
-               aligned_pc = pc & ~3;
-               if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
-                       get_user_error = -EFAULT;
-               } else {
-                       get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
-               }
-               if (get_user_error >= 0) {
-                       unsigned long index, shift;
-                       unsigned long major, minor, combined;
-                       unsigned long reserved_field;
-                       reserved_field = opcode & 0xf; /* These bits are currently reserved as zero in all valid opcodes */
-                       major = (opcode >> 26) & 0x3f;
-                       minor = (opcode >> 16) & 0xf;
-                       combined = (major << 4) | minor;
-                       index = major;
-                       shift = minor << 1;
-                       if (reserved_field == 0) {
-                               int opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
-                               switch (opcode_state) {
-                                       case OPCODE_INVALID:
-                                               /* Trap. */
-                                               break;
-                                       case OPCODE_USER_VALID:
-                                               /* Restart the instruction : the branch to the instruction will now be from an RTE
-                                                  not from SHcompact so the silicon defect won't be triggered. */
-                                               return;
-                                       case OPCODE_PRIV_VALID:
-                                               if (!user_mode(regs)) {
-                                                       /* Should only ever get here if a module has
-                                                          SHcompact code inside it.  If so, the same fix up is needed. */
-                                                       return; /* same reason */
-                                               }
-                                               /* Otherwise, user mode trying to execute a privileged instruction -
-                                                  fall through to trap. */
-                                               break;
-                                       case OPCODE_CTRL_REG:
-                                               /* If in privileged mode, return as above. */
-                                               if (!user_mode(regs)) return;
-                                               /* In user mode ... */
-                                               if (combined == 0x9f) { /* GETCON */
-                                                       unsigned long regno = (opcode >> 20) & 0x3f;
-                                                       if (regno >= 62) {
-                                                               return;
-                                                       }
-                                                       /* Otherwise, reserved or privileged control register, => trap */
-                                               } else if (combined == 0x1bf) { /* PUTCON */
-                                                       unsigned long regno = (opcode >> 4) & 0x3f;
-                                                       if (regno >= 62) {
-                                                               return;
-                                                       }
-                                                       /* Otherwise, reserved or privileged control register, => trap */
-                                               } else {
-                                                       /* Trap */
-                                               }
-                                               break;
-                                       default:
-                                               /* Fall through to trap. */
-                                               break;
-                               }
-                       }
-                       /* fall through to normal resinst processing */
-               } else {
-                       /* Error trying to read opcode.  This typically means a
-                          real fault, not a RESINST any more.  So change the
-                          codes. */
-                       trapnr = 87;
-                       exception_name = "address error (exec)";
-                       signr = SIGSEGV;
-               }
-       }
-
-       do_unhandled_exception(trapnr, signr, exception_name, "do_reserved_inst", error_code, regs, current);
-}
-
-#else /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* If the workaround isn't needed, this is just a straightforward reserved
-   instruction */
-DO_ERROR(12, SIGILL,  "reserved instruction", reserved_inst, current)
-
-#endif /* CONFIG_SH64_ID2815_WORKAROUND */
-
-/* Called with interrupts disabled */
-asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
-{
-       PLS();
-       show_excp_regs(__FUNCTION__, -1, -1, regs);
-       die_if_kernel("exception", regs, ex);
-}
-
-int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
-{
-       /* Syscall debug */
-        printk("System call ID error: [0x1#args:8 #syscall:16  0x%lx]\n", scId);
-
-       die_if_kernel("unknown trapa", regs, scId);
-
-       return -ENOSYS;
-}
-
-void show_stack(struct task_struct *tsk, unsigned long *sp)
-{
-#ifdef CONFIG_KALLSYMS
-       extern void sh64_unwind(struct pt_regs *regs);
-       struct pt_regs *regs;
-
-       regs = tsk ? tsk->thread.kregs : NULL;
-
-       sh64_unwind(regs);
-#else
-       printk(KERN_ERR "Can't backtrace on sh64 without CONFIG_KALLSYMS\n");
-#endif
-}
-
-void show_task(unsigned long *sp)
-{
-       show_stack(NULL, sp);
-}
-
-void dump_stack(void)
-{
-       show_task(NULL);
-}
-/* Needed by any user of WARN_ON in view of the defn in include/asm-sh/bug.h */
-EXPORT_SYMBOL(dump_stack);
-
-static void do_unhandled_exception(int trapnr, int signr, char *str, char *fn_name,
-               unsigned long error_code, struct pt_regs *regs, struct task_struct *tsk)
-{
-       show_excp_regs(fn_name, trapnr, signr, regs);
-       tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = trapnr;
-
-       if (user_mode(regs))
-               force_sig(signr, tsk);
-
-       die_if_no_fixup(str, regs, error_code);
-}
-
-static int read_opcode(unsigned long long pc, unsigned long *result_opcode, int from_user_mode)
-{
-       int get_user_error;
-       unsigned long aligned_pc;
-       unsigned long opcode;
-
-       if ((pc & 3) == 1) {
-               /* SHmedia */
-               aligned_pc = pc & ~3;
-               if (from_user_mode) {
-                       if (!access_ok(VERIFY_READ, aligned_pc, sizeof(unsigned long))) {
-                               get_user_error = -EFAULT;
-                       } else {
-                               get_user_error = __get_user(opcode, (unsigned long *)aligned_pc);
-                               *result_opcode = opcode;
-                       }
-                       return get_user_error;
-               } else {
-                       /* If the fault was in the kernel, we can either read
-                        * this directly, or if not, we fault.
-                       */
-                       *result_opcode = *(unsigned long *) aligned_pc;
-                       return 0;
-               }
-       } else if ((pc & 1) == 0) {
-               /* SHcompact */
-               /* TODO : provide handling for this.  We don't really support
-                  user-mode SHcompact yet, and for a kernel fault, this would
-                  have to come from a module built for SHcompact.  */
-               return -EFAULT;
-       } else {
-               /* misaligned */
-               return -EFAULT;
-       }
-}
-
-static int address_is_sign_extended(__u64 a)
-{
-       __u64 b;
-#if (NEFF == 32)
-       b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
-       return (b == a) ? 1 : 0;
-#else
-#error "Sign extend check only works for NEFF==32"
-#endif
-}
-
-static int generate_and_check_address(struct pt_regs *regs,
-                                     __u32 opcode,
-                                     int displacement_not_indexed,
-                                     int width_shift,
-                                     __u64 *address)
-{
-       /* return -1 for fault, 0 for OK */
-
-       __u64 base_address, addr;
-       int basereg;
-
-       basereg = (opcode >> 20) & 0x3f;
-       base_address = regs->regs[basereg];
-       if (displacement_not_indexed) {
-               __s64 displacement;
-               displacement = (opcode >> 10) & 0x3ff;
-               displacement = ((displacement << 54) >> 54); /* sign extend */
-               addr = (__u64)((__s64)base_address + (displacement << width_shift));
-       } else {
-               __u64 offset;
-               int offsetreg;
-               offsetreg = (opcode >> 10) & 0x3f;
-               offset = regs->regs[offsetreg];
-               addr = base_address + offset;
-       }
-
-       /* Check sign extended */
-       if (!address_is_sign_extended(addr)) {
-               return -1;
-       }
-
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-       /* Check accessible.  For misaligned access in the kernel, assume the
-          address is always accessible (and if not, just fault when the
-          load/store gets done.) */
-       if (user_mode(regs)) {
-               if (addr >= TASK_SIZE) {
-                       return -1;
-               }
-               /* Do access_ok check later - it depends on whether it's a load or a store. */
-       }
-#endif
-
-       *address = addr;
-       return 0;
-}
-
-/* Default value as for sh */
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-static int user_mode_unaligned_fixup_count = 10;
-static int user_mode_unaligned_fixup_enable = 1;
-#endif
-
-static int kernel_mode_unaligned_fixup_count = 32;
-
-static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
-{
-       unsigned short x;
-       unsigned char *p, *q;
-       p = (unsigned char *) (int) address;
-       q = (unsigned char *) &x;
-       q[0] = p[0];
-       q[1] = p[1];
-
-       if (do_sign_extend) {
-               *result = (__u64)(__s64) *(short *) &x;
-       } else {
-               *result = (__u64) x;
-       }
-}
-
-static void misaligned_kernel_word_store(__u64 address, __u64 value)
-{
-       unsigned short x;
-       unsigned char *p, *q;
-       p = (unsigned char *) (int) address;
-       q = (unsigned char *) &x;
-
-       x = (__u16) value;
-       p[0] = q[0];
-       p[1] = q[1];
-}
-
-static int misaligned_load(struct pt_regs *regs,
-                          __u32 opcode,
-                          int displacement_not_indexed,
-                          int width_shift,
-                          int do_sign_extend)
-{
-       /* Return -1 for a fault, 0 for OK */
-       int error;
-       int destreg;
-       __u64 address;
-
-       error = generate_and_check_address(regs, opcode,
-                       displacement_not_indexed, width_shift, &address);
-       if (error < 0) {
-               return error;
-       }
-
-       destreg = (opcode >> 4) & 0x3f;
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-       if (user_mode(regs)) {
-               __u64 buffer;
-
-               if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
-                       return -1;
-               }
-
-               if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
-                       return -1; /* fault */
-               }
-               switch (width_shift) {
-               case 1:
-                       if (do_sign_extend) {
-                               regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
-                       } else {
-                               regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
-                       }
-                       break;
-               case 2:
-                       regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
-                       break;
-               case 3:
-                       regs->regs[destreg] = buffer;
-                       break;
-               default:
-                       printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
-                               width_shift, (unsigned long) regs->pc);
-                       break;
-               }
-       } else
-#endif
-       {
-               /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
-               __u64 lo, hi;
-
-               switch (width_shift) {
-               case 1:
-                       misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
-                       break;
-               case 2:
-                       asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
-                       asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
-                       regs->regs[destreg] = lo | hi;
-                       break;
-               case 3:
-                       asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
-                       asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
-                       regs->regs[destreg] = lo | hi;
-                       break;
-
-               default:
-                       printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
-                               width_shift, (unsigned long) regs->pc);
-                       break;
-               }
-       }
-
-       return 0;
-
-}
-
-static int misaligned_store(struct pt_regs *regs,
-                           __u32 opcode,
-                           int displacement_not_indexed,
-                           int width_shift)
-{
-       /* Return -1 for a fault, 0 for OK */
-       int error;
-       int srcreg;
-       __u64 address;
-
-       error = generate_and_check_address(regs, opcode,
-                       displacement_not_indexed, width_shift, &address);
-       if (error < 0) {
-               return error;
-       }
-
-       srcreg = (opcode >> 4) & 0x3f;
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-       if (user_mode(regs)) {
-               __u64 buffer;
-
-               if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
-                       return -1;
-               }
-
-               switch (width_shift) {
-               case 1:
-                       *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
-                       break;
-               case 2:
-                       *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
-                       break;
-               case 3:
-                       buffer = regs->regs[srcreg];
-                       break;
-               default:
-                       printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
-                               width_shift, (unsigned long) regs->pc);
-                       break;
-               }
-
-               if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
-                       return -1; /* fault */
-               }
-       } else
-#endif
-       {
-               /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
-               __u64 val = regs->regs[srcreg];
-
-               switch (width_shift) {
-               case 1:
-                       misaligned_kernel_word_store(address, val);
-                       break;
-               case 2:
-                       asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
-                       asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
-                       break;
-               case 3:
-                       asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
-                       asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
-                       break;
-
-               default:
-                       printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
-                               width_shift, (unsigned long) regs->pc);
-                       break;
-               }
-       }
-
-       return 0;
-
-}
-
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-/* Never need to fix up misaligned FPU accesses within the kernel since that's a real
-   error. */
-static int misaligned_fpu_load(struct pt_regs *regs,
-                          __u32 opcode,
-                          int displacement_not_indexed,
-                          int width_shift,
-                          int do_paired_load)
-{
-       /* Return -1 for a fault, 0 for OK */
-       int error;
-       int destreg;
-       __u64 address;
-
-       error = generate_and_check_address(regs, opcode,
-                       displacement_not_indexed, width_shift, &address);
-       if (error < 0) {
-               return error;
-       }
-
-       destreg = (opcode >> 4) & 0x3f;
-       if (user_mode(regs)) {
-               __u64 buffer;
-               __u32 buflo, bufhi;
-
-               if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
-                       return -1;
-               }
-
-               if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
-                       return -1; /* fault */
-               }
-               /* 'current' may be the current owner of the FPU state, so
-                  context switch the registers into memory so they can be
-                  indexed by register number. */
-               if (last_task_used_math == current) {
-                       grab_fpu();
-                       fpsave(&current->thread.fpu.hard);
-                       release_fpu();
-                       last_task_used_math = NULL;
-                       regs->sr |= SR_FD;
-               }
-
-               buflo = *(__u32*) &buffer;
-               bufhi = *(1 + (__u32*) &buffer);
-
-               switch (width_shift) {
-               case 2:
-                       current->thread.fpu.hard.fp_regs[destreg] = buflo;
-                       break;
-               case 3:
-                       if (do_paired_load) {
-                               current->thread.fpu.hard.fp_regs[destreg] = buflo;
-                               current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
-                       } else {
-#if defined(CONFIG_LITTLE_ENDIAN)
-                               current->thread.fpu.hard.fp_regs[destreg] = bufhi;
-                               current->thread.fpu.hard.fp_regs[destreg+1] = buflo;
-#else
-                               current->thread.fpu.hard.fp_regs[destreg] = buflo;
-                               current->thread.fpu.hard.fp_regs[destreg+1] = bufhi;
-#endif
-                       }
-                       break;
-               default:
-                       printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
-                               width_shift, (unsigned long) regs->pc);
-                       break;
-               }
-               return 0;
-       } else {
-               die ("Misaligned FPU load inside kernel", regs, 0);
-               return -1;
-       }
-
-
-}
-
-static int misaligned_fpu_store(struct pt_regs *regs,
-                          __u32 opcode,
-                          int displacement_not_indexed,
-                          int width_shift,
-                          int do_paired_load)
-{
-       /* Return -1 for a fault, 0 for OK */
-       int error;
-       int srcreg;
-       __u64 address;
-
-       error = generate_and_check_address(regs, opcode,
-                       displacement_not_indexed, width_shift, &address);
-       if (error < 0) {
-               return error;
-       }
-
-       srcreg = (opcode >> 4) & 0x3f;
-       if (user_mode(regs)) {
-               __u64 buffer;
-               /* Initialise these to NaNs. */
-               __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
-
-               if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
-                       return -1;
-               }
-
-               /* 'current' may be the current owner of the FPU state, so
-                  context switch the registers into memory so they can be
-                  indexed by register number. */
-               if (last_task_used_math == current) {
-                       grab_fpu();
-                       fpsave(&current->thread.fpu.hard);
-                       release_fpu();
-                       last_task_used_math = NULL;
-                       regs->sr |= SR_FD;
-               }
-
-               switch (width_shift) {
-               case 2:
-                       buflo = current->thread.fpu.hard.fp_regs[srcreg];
-                       break;
-               case 3:
-                       if (do_paired_load) {
-                               buflo = current->thread.fpu.hard.fp_regs[srcreg];
-                               bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
-                       } else {
-#if defined(CONFIG_LITTLE_ENDIAN)
-                               bufhi = current->thread.fpu.hard.fp_regs[srcreg];
-                               buflo = current->thread.fpu.hard.fp_regs[srcreg+1];
-#else
-                               buflo = current->thread.fpu.hard.fp_regs[srcreg];
-                               bufhi = current->thread.fpu.hard.fp_regs[srcreg+1];
-#endif
-                       }
-                       break;
-               default:
-                       printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
-                               width_shift, (unsigned long) regs->pc);
-                       break;
-               }
-
-               *(__u32*) &buffer = buflo;
-               *(1 + (__u32*) &buffer) = bufhi;
-               if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
-                       return -1; /* fault */
-               }
-               return 0;
-       } else {
-               die ("Misaligned FPU load inside kernel", regs, 0);
-               return -1;
-       }
-}
-#endif
-
-static int misaligned_fixup(struct pt_regs *regs)
-{
-       unsigned long opcode;
-       int error;
-       int major, minor;
-
-#if !defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-       /* Never fixup user mode misaligned accesses without this option enabled. */
-       return -1;
-#else
-       if (!user_mode_unaligned_fixup_enable) return -1;
-#endif
-
-       error = read_opcode(regs->pc, &opcode, user_mode(regs));
-       if (error < 0) {
-               return error;
-       }
-       major = (opcode >> 26) & 0x3f;
-       minor = (opcode >> 16) & 0xf;
-
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-       if (user_mode(regs) && (user_mode_unaligned_fixup_count > 0)) {
-               --user_mode_unaligned_fixup_count;
-               /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
-               printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
-                      current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
-       } else
-#endif
-       if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
-               --kernel_mode_unaligned_fixup_count;
-               if (in_interrupt()) {
-                       printk("Fixing up unaligned kernelspace access in interrupt pc=0x%08x ins=0x%08lx\n",
-                              (__u32)regs->pc, opcode);
-               } else {
-                       printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
-                              current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
-               }
-       }
-
-
-       switch (major) {
-               case (0x84>>2): /* LD.W */
-                       error = misaligned_load(regs, opcode, 1, 1, 1);
-                       break;
-               case (0xb0>>2): /* LD.UW */
-                       error = misaligned_load(regs, opcode, 1, 1, 0);
-                       break;
-               case (0x88>>2): /* LD.L */
-                       error = misaligned_load(regs, opcode, 1, 2, 1);
-                       break;
-               case (0x8c>>2): /* LD.Q */
-                       error = misaligned_load(regs, opcode, 1, 3, 0);
-                       break;
-
-               case (0xa4>>2): /* ST.W */
-                       error = misaligned_store(regs, opcode, 1, 1);
-                       break;
-               case (0xa8>>2): /* ST.L */
-                       error = misaligned_store(regs, opcode, 1, 2);
-                       break;
-               case (0xac>>2): /* ST.Q */
-                       error = misaligned_store(regs, opcode, 1, 3);
-                       break;
-
-               case (0x40>>2): /* indexed loads */
-                       switch (minor) {
-                               case 0x1: /* LDX.W */
-                                       error = misaligned_load(regs, opcode, 0, 1, 1);
-                                       break;
-                               case 0x5: /* LDX.UW */
-                                       error = misaligned_load(regs, opcode, 0, 1, 0);
-                                       break;
-                               case 0x2: /* LDX.L */
-                                       error = misaligned_load(regs, opcode, 0, 2, 1);
-                                       break;
-                               case 0x3: /* LDX.Q */
-                                       error = misaligned_load(regs, opcode, 0, 3, 0);
-                                       break;
-                               default:
-                                       error = -1;
-                                       break;
-                       }
-                       break;
-
-               case (0x60>>2): /* indexed stores */
-                       switch (minor) {
-                               case 0x1: /* STX.W */
-                                       error = misaligned_store(regs, opcode, 0, 1);
-                                       break;
-                               case 0x2: /* STX.L */
-                                       error = misaligned_store(regs, opcode, 0, 2);
-                                       break;
-                               case 0x3: /* STX.Q */
-                                       error = misaligned_store(regs, opcode, 0, 3);
-                                       break;
-                               default:
-                                       error = -1;
-                                       break;
-                       }
-                       break;
-
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-               case (0x94>>2): /* FLD.S */
-                       error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
-                       break;
-               case (0x98>>2): /* FLD.P */
-                       error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
-                       break;
-               case (0x9c>>2): /* FLD.D */
-                       error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
-                       break;
-               case (0x1c>>2): /* floating indexed loads */
-                       switch (minor) {
-                       case 0x8: /* FLDX.S */
-                               error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
-                               break;
-                       case 0xd: /* FLDX.P */
-                               error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
-                               break;
-                       case 0x9: /* FLDX.D */
-                               error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
-                               break;
-                       default:
-                               error = -1;
-                               break;
-                       }
-                       break;
-               case (0xb4>>2): /* FLD.S */
-                       error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
-                       break;
-               case (0xb8>>2): /* FLD.P */
-                       error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
-                       break;
-               case (0xbc>>2): /* FLD.D */
-                       error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
-                       break;
-               case (0x3c>>2): /* floating indexed stores */
-                       switch (minor) {
-                       case 0x8: /* FSTX.S */
-                               error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
-                               break;
-                       case 0xd: /* FSTX.P */
-                               error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
-                               break;
-                       case 0x9: /* FSTX.D */
-                               error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
-                               break;
-                       default:
-                               error = -1;
-                               break;
-                       }
-                       break;
-#endif
-
-               default:
-                       /* Fault */
-                       error = -1;
-                       break;
-       }
-
-       if (error < 0) {
-               return error;
-       } else {
-               regs->pc += 4; /* Skip the instruction that's just been emulated */
-               return 0;
-       }
-
-}
-
-static ctl_table unaligned_table[] = {
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "kernel_reports",
-               .data           = &kernel_mode_unaligned_fixup_count,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec
-       },
-#if defined(CONFIG_SH64_USER_MISALIGNED_FIXUP)
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "user_reports",
-               .data           = &user_mode_unaligned_fixup_count,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec
-       },
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "user_enable",
-               .data           = &user_mode_unaligned_fixup_enable,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = &proc_dointvec},
-#endif
-       {}
-};
-
-static ctl_table unaligned_root[] = {
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "unaligned_fixup",
-               .mode           = 0555,
-               unaligned_table
-       },
-       {}
-};
-
-static ctl_table sh64_root[] = {
-       {
-               .ctl_name       = CTL_UNNUMBERED,
-               .procname       = "sh64",
-               .mode           = 0555,
-               .child          = unaligned_root
-       },
-       {}
-};
-static struct ctl_table_header *sysctl_header;
-static int __init init_sysctl(void)
-{
-       sysctl_header = register_sysctl_table(sh64_root);
-       return 0;
-}
-
-__initcall(init_sysctl);
-
-
-asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
-{
-       u64 peek_real_address_q(u64 addr);
-       u64 poke_real_address_q(u64 addr, u64 val);
-       unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
-       unsigned long long exp_cause;
-       /* It's not worth ioremapping the debug module registers for the amount
-          of access we make to them - just go direct to their physical
-          addresses. */
-       exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
-       if (exp_cause & ~4) {
-               printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
-                       (unsigned long)(exp_cause & 0xffffffff));
-       }
-       show_state();
-       /* Clear all DEBUGINT causes */
-       poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
-}
This page took 0.0616679999999999 seconds and 5 git commands to generate.